1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 69 struct lpfc_cqe *); 70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, 71 int); 72 73 static IOCB_t * 74 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 75 { 76 return &iocbq->iocb; 77 } 78 79 /** 80 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 81 * @q: The Work Queue to operate on. 82 * @wqe: The work Queue Entry to put on the Work queue. 83 * 84 * This routine will copy the contents of @wqe to the next available entry on 85 * the @q. This function will then ring the Work Queue Doorbell to signal the 86 * HBA to start processing the Work Queue Entry. This function returns 0 if 87 * successful. If no entries are available on @q then this function will return 88 * -ENOMEM. 89 * The caller is expected to hold the hbalock when calling this routine. 90 **/ 91 static uint32_t 92 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 93 { 94 union lpfc_wqe *temp_wqe; 95 struct lpfc_register doorbell; 96 uint32_t host_index; 97 98 /* sanity check on queue memory */ 99 if (unlikely(!q)) 100 return -ENOMEM; 101 temp_wqe = q->qe[q->host_index].wqe; 102 103 /* If the host has not yet processed the next entry then we are done */ 104 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 105 return -ENOMEM; 106 /* set consumption flag every once in a while */ 107 if (!((q->host_index + 1) % q->entry_repost)) 108 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 109 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 110 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 111 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 112 113 /* Update the host index before invoking device */ 114 host_index = q->host_index; 115 q->host_index = ((q->host_index + 1) % q->entry_count); 116 117 /* Ring Doorbell */ 118 doorbell.word0 = 0; 119 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 120 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 121 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 122 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 123 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ 124 125 return 0; 126 } 127 128 /** 129 * lpfc_sli4_wq_release - Updates internal hba index for WQ 130 * @q: The Work Queue to operate on. 131 * @index: The index to advance the hba index to. 132 * 133 * This routine will update the HBA index of a queue to reflect consumption of 134 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 135 * an entry the host calls this function to update the queue's internal 136 * pointers. This routine returns the number of entries that were consumed by 137 * the HBA. 138 **/ 139 static uint32_t 140 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 141 { 142 uint32_t released = 0; 143 144 /* sanity check on queue memory */ 145 if (unlikely(!q)) 146 return 0; 147 148 if (q->hba_index == index) 149 return 0; 150 do { 151 q->hba_index = ((q->hba_index + 1) % q->entry_count); 152 released++; 153 } while (q->hba_index != index); 154 return released; 155 } 156 157 /** 158 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 159 * @q: The Mailbox Queue to operate on. 160 * @wqe: The Mailbox Queue Entry to put on the Work queue. 161 * 162 * This routine will copy the contents of @mqe to the next available entry on 163 * the @q. This function will then ring the Work Queue Doorbell to signal the 164 * HBA to start processing the Work Queue Entry. This function returns 0 if 165 * successful. If no entries are available on @q then this function will return 166 * -ENOMEM. 167 * The caller is expected to hold the hbalock when calling this routine. 168 **/ 169 static uint32_t 170 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 171 { 172 struct lpfc_mqe *temp_mqe; 173 struct lpfc_register doorbell; 174 uint32_t host_index; 175 176 /* sanity check on queue memory */ 177 if (unlikely(!q)) 178 return -ENOMEM; 179 temp_mqe = q->qe[q->host_index].mqe; 180 181 /* If the host has not yet processed the next entry then we are done */ 182 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 183 return -ENOMEM; 184 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 185 /* Save off the mailbox pointer for completion */ 186 q->phba->mbox = (MAILBOX_t *)temp_mqe; 187 188 /* Update the host index before invoking device */ 189 host_index = q->host_index; 190 q->host_index = ((q->host_index + 1) % q->entry_count); 191 192 /* Ring Doorbell */ 193 doorbell.word0 = 0; 194 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 195 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 196 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 197 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ 198 return 0; 199 } 200 201 /** 202 * lpfc_sli4_mq_release - Updates internal hba index for MQ 203 * @q: The Mailbox Queue to operate on. 204 * 205 * This routine will update the HBA index of a queue to reflect consumption of 206 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 207 * an entry the host calls this function to update the queue's internal 208 * pointers. This routine returns the number of entries that were consumed by 209 * the HBA. 210 **/ 211 static uint32_t 212 lpfc_sli4_mq_release(struct lpfc_queue *q) 213 { 214 /* sanity check on queue memory */ 215 if (unlikely(!q)) 216 return 0; 217 218 /* Clear the mailbox pointer for completion */ 219 q->phba->mbox = NULL; 220 q->hba_index = ((q->hba_index + 1) % q->entry_count); 221 return 1; 222 } 223 224 /** 225 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 226 * @q: The Event Queue to get the first valid EQE from 227 * 228 * This routine will get the first valid Event Queue Entry from @q, update 229 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 230 * the Queue (no more work to do), or the Queue is full of EQEs that have been 231 * processed, but not popped back to the HBA then this routine will return NULL. 232 **/ 233 static struct lpfc_eqe * 234 lpfc_sli4_eq_get(struct lpfc_queue *q) 235 { 236 struct lpfc_eqe *eqe; 237 238 /* sanity check on queue memory */ 239 if (unlikely(!q)) 240 return NULL; 241 eqe = q->qe[q->hba_index].eqe; 242 243 /* If the next EQE is not valid then we are done */ 244 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 245 return NULL; 246 /* If the host has not yet processed the next entry then we are done */ 247 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 248 return NULL; 249 250 q->hba_index = ((q->hba_index + 1) % q->entry_count); 251 return eqe; 252 } 253 254 /** 255 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 256 * @q: The Event Queue that the host has completed processing for. 257 * @arm: Indicates whether the host wants to arms this CQ. 258 * 259 * This routine will mark all Event Queue Entries on @q, from the last 260 * known completed entry to the last entry that was processed, as completed 261 * by clearing the valid bit for each completion queue entry. Then it will 262 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 263 * The internal host index in the @q will be updated by this routine to indicate 264 * that the host has finished processing the entries. The @arm parameter 265 * indicates that the queue should be rearmed when ringing the doorbell. 266 * 267 * This function will return the number of EQEs that were popped. 268 **/ 269 uint32_t 270 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 271 { 272 uint32_t released = 0; 273 struct lpfc_eqe *temp_eqe; 274 struct lpfc_register doorbell; 275 276 /* sanity check on queue memory */ 277 if (unlikely(!q)) 278 return 0; 279 280 /* while there are valid entries */ 281 while (q->hba_index != q->host_index) { 282 temp_eqe = q->qe[q->host_index].eqe; 283 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 284 released++; 285 q->host_index = ((q->host_index + 1) % q->entry_count); 286 } 287 if (unlikely(released == 0 && !arm)) 288 return 0; 289 290 /* ring doorbell for number popped */ 291 doorbell.word0 = 0; 292 if (arm) { 293 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 294 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 295 } 296 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 297 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 298 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 299 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 300 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 301 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 302 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 303 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 304 readl(q->phba->sli4_hba.EQCQDBregaddr); 305 return released; 306 } 307 308 /** 309 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 310 * @q: The Completion Queue to get the first valid CQE from 311 * 312 * This routine will get the first valid Completion Queue Entry from @q, update 313 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 314 * the Queue (no more work to do), or the Queue is full of CQEs that have been 315 * processed, but not popped back to the HBA then this routine will return NULL. 316 **/ 317 static struct lpfc_cqe * 318 lpfc_sli4_cq_get(struct lpfc_queue *q) 319 { 320 struct lpfc_cqe *cqe; 321 322 /* sanity check on queue memory */ 323 if (unlikely(!q)) 324 return NULL; 325 326 /* If the next CQE is not valid then we are done */ 327 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 328 return NULL; 329 /* If the host has not yet processed the next entry then we are done */ 330 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 331 return NULL; 332 333 cqe = q->qe[q->hba_index].cqe; 334 q->hba_index = ((q->hba_index + 1) % q->entry_count); 335 return cqe; 336 } 337 338 /** 339 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 340 * @q: The Completion Queue that the host has completed processing for. 341 * @arm: Indicates whether the host wants to arms this CQ. 342 * 343 * This routine will mark all Completion queue entries on @q, from the last 344 * known completed entry to the last entry that was processed, as completed 345 * by clearing the valid bit for each completion queue entry. Then it will 346 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 347 * The internal host index in the @q will be updated by this routine to indicate 348 * that the host has finished processing the entries. The @arm parameter 349 * indicates that the queue should be rearmed when ringing the doorbell. 350 * 351 * This function will return the number of CQEs that were released. 352 **/ 353 uint32_t 354 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 355 { 356 uint32_t released = 0; 357 struct lpfc_cqe *temp_qe; 358 struct lpfc_register doorbell; 359 360 /* sanity check on queue memory */ 361 if (unlikely(!q)) 362 return 0; 363 /* while there are valid entries */ 364 while (q->hba_index != q->host_index) { 365 temp_qe = q->qe[q->host_index].cqe; 366 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 367 released++; 368 q->host_index = ((q->host_index + 1) % q->entry_count); 369 } 370 if (unlikely(released == 0 && !arm)) 371 return 0; 372 373 /* ring doorbell for number popped */ 374 doorbell.word0 = 0; 375 if (arm) 376 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 377 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 378 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 379 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 380 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 381 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 382 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 383 return released; 384 } 385 386 /** 387 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 388 * @q: The Header Receive Queue to operate on. 389 * @wqe: The Receive Queue Entry to put on the Receive queue. 390 * 391 * This routine will copy the contents of @wqe to the next available entry on 392 * the @q. This function will then ring the Receive Queue Doorbell to signal the 393 * HBA to start processing the Receive Queue Entry. This function returns the 394 * index that the rqe was copied to if successful. If no entries are available 395 * on @q then this function will return -ENOMEM. 396 * The caller is expected to hold the hbalock when calling this routine. 397 **/ 398 static int 399 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 400 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 401 { 402 struct lpfc_rqe *temp_hrqe; 403 struct lpfc_rqe *temp_drqe; 404 struct lpfc_register doorbell; 405 int put_index = hq->host_index; 406 407 /* sanity check on queue memory */ 408 if (unlikely(!hq) || unlikely(!dq)) 409 return -ENOMEM; 410 temp_hrqe = hq->qe[hq->host_index].rqe; 411 temp_drqe = dq->qe[dq->host_index].rqe; 412 413 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 414 return -EINVAL; 415 if (hq->host_index != dq->host_index) 416 return -EINVAL; 417 /* If the host has not yet processed the next entry then we are done */ 418 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 419 return -EBUSY; 420 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 421 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 422 423 /* Update the host index to point to the next slot */ 424 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 425 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 426 427 /* Ring The Header Receive Queue Doorbell */ 428 if (!(hq->host_index % hq->entry_repost)) { 429 doorbell.word0 = 0; 430 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 431 hq->entry_repost); 432 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 433 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 434 } 435 return put_index; 436 } 437 438 /** 439 * lpfc_sli4_rq_release - Updates internal hba index for RQ 440 * @q: The Header Receive Queue to operate on. 441 * 442 * This routine will update the HBA index of a queue to reflect consumption of 443 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 444 * consumed an entry the host calls this function to update the queue's 445 * internal pointers. This routine returns the number of entries that were 446 * consumed by the HBA. 447 **/ 448 static uint32_t 449 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 450 { 451 /* sanity check on queue memory */ 452 if (unlikely(!hq) || unlikely(!dq)) 453 return 0; 454 455 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 456 return 0; 457 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 458 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 459 return 1; 460 } 461 462 /** 463 * lpfc_cmd_iocb - Get next command iocb entry in the ring 464 * @phba: Pointer to HBA context object. 465 * @pring: Pointer to driver SLI ring object. 466 * 467 * This function returns pointer to next command iocb entry 468 * in the command ring. The caller must hold hbalock to prevent 469 * other threads consume the next command iocb. 470 * SLI-2/SLI-3 provide different sized iocbs. 471 **/ 472 static inline IOCB_t * 473 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 474 { 475 return (IOCB_t *) (((char *) pring->cmdringaddr) + 476 pring->cmdidx * phba->iocb_cmd_size); 477 } 478 479 /** 480 * lpfc_resp_iocb - Get next response iocb entry in the ring 481 * @phba: Pointer to HBA context object. 482 * @pring: Pointer to driver SLI ring object. 483 * 484 * This function returns pointer to next response iocb entry 485 * in the response ring. The caller must hold hbalock to make sure 486 * that no other thread consume the next response iocb. 487 * SLI-2/SLI-3 provide different sized iocbs. 488 **/ 489 static inline IOCB_t * 490 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 491 { 492 return (IOCB_t *) (((char *) pring->rspringaddr) + 493 pring->rspidx * phba->iocb_rsp_size); 494 } 495 496 /** 497 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 498 * @phba: Pointer to HBA context object. 499 * 500 * This function is called with hbalock held. This function 501 * allocates a new driver iocb object from the iocb pool. If the 502 * allocation is successful, it returns pointer to the newly 503 * allocated iocb object else it returns NULL. 504 **/ 505 struct lpfc_iocbq * 506 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 507 { 508 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 509 struct lpfc_iocbq * iocbq = NULL; 510 511 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 512 if (iocbq) 513 phba->iocb_cnt++; 514 if (phba->iocb_cnt > phba->iocb_max) 515 phba->iocb_max = phba->iocb_cnt; 516 return iocbq; 517 } 518 519 /** 520 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 521 * @phba: Pointer to HBA context object. 522 * @xritag: XRI value. 523 * 524 * This function clears the sglq pointer from the array of acive 525 * sglq's. The xritag that is passed in is used to index into the 526 * array. Before the xritag can be used it needs to be adjusted 527 * by subtracting the xribase. 528 * 529 * Returns sglq ponter = success, NULL = Failure. 530 **/ 531 static struct lpfc_sglq * 532 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 533 { 534 struct lpfc_sglq *sglq; 535 536 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 537 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 538 return sglq; 539 } 540 541 /** 542 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 543 * @phba: Pointer to HBA context object. 544 * @xritag: XRI value. 545 * 546 * This function returns the sglq pointer from the array of acive 547 * sglq's. The xritag that is passed in is used to index into the 548 * array. Before the xritag can be used it needs to be adjusted 549 * by subtracting the xribase. 550 * 551 * Returns sglq ponter = success, NULL = Failure. 552 **/ 553 struct lpfc_sglq * 554 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 555 { 556 struct lpfc_sglq *sglq; 557 558 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 559 return sglq; 560 } 561 562 /** 563 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 564 * @phba: Pointer to HBA context object. 565 * @xritag: xri used in this exchange. 566 * @rrq: The RRQ to be cleared. 567 * 568 **/ 569 void 570 lpfc_clr_rrq_active(struct lpfc_hba *phba, 571 uint16_t xritag, 572 struct lpfc_node_rrq *rrq) 573 { 574 struct lpfc_nodelist *ndlp = NULL; 575 576 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 577 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 578 579 /* The target DID could have been swapped (cable swap) 580 * we should use the ndlp from the findnode if it is 581 * available. 582 */ 583 if ((!ndlp) && rrq->ndlp) 584 ndlp = rrq->ndlp; 585 586 if (!ndlp) 587 goto out; 588 589 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { 590 rrq->send_rrq = 0; 591 rrq->xritag = 0; 592 rrq->rrq_stop_time = 0; 593 } 594 out: 595 mempool_free(rrq, phba->rrq_pool); 596 } 597 598 /** 599 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 600 * @phba: Pointer to HBA context object. 601 * 602 * This function is called with hbalock held. This function 603 * Checks if stop_time (ratov from setting rrq active) has 604 * been reached, if it has and the send_rrq flag is set then 605 * it will call lpfc_send_rrq. If the send_rrq flag is not set 606 * then it will just call the routine to clear the rrq and 607 * free the rrq resource. 608 * The timer is set to the next rrq that is going to expire before 609 * leaving the routine. 610 * 611 **/ 612 void 613 lpfc_handle_rrq_active(struct lpfc_hba *phba) 614 { 615 struct lpfc_node_rrq *rrq; 616 struct lpfc_node_rrq *nextrrq; 617 unsigned long next_time; 618 unsigned long iflags; 619 LIST_HEAD(send_rrq); 620 621 spin_lock_irqsave(&phba->hbalock, iflags); 622 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 623 next_time = jiffies + HZ * (phba->fc_ratov + 1); 624 list_for_each_entry_safe(rrq, nextrrq, 625 &phba->active_rrq_list, list) { 626 if (time_after(jiffies, rrq->rrq_stop_time)) 627 list_move(&rrq->list, &send_rrq); 628 else if (time_before(rrq->rrq_stop_time, next_time)) 629 next_time = rrq->rrq_stop_time; 630 } 631 spin_unlock_irqrestore(&phba->hbalock, iflags); 632 if (!list_empty(&phba->active_rrq_list)) 633 mod_timer(&phba->rrq_tmr, next_time); 634 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 635 list_del(&rrq->list); 636 if (!rrq->send_rrq) 637 /* this call will free the rrq */ 638 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 639 else if (lpfc_send_rrq(phba, rrq)) { 640 /* if we send the rrq then the completion handler 641 * will clear the bit in the xribitmap. 642 */ 643 lpfc_clr_rrq_active(phba, rrq->xritag, 644 rrq); 645 } 646 } 647 } 648 649 /** 650 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 651 * @vport: Pointer to vport context object. 652 * @xri: The xri used in the exchange. 653 * @did: The targets DID for this exchange. 654 * 655 * returns NULL = rrq not found in the phba->active_rrq_list. 656 * rrq = rrq for this xri and target. 657 **/ 658 struct lpfc_node_rrq * 659 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 660 { 661 struct lpfc_hba *phba = vport->phba; 662 struct lpfc_node_rrq *rrq; 663 struct lpfc_node_rrq *nextrrq; 664 unsigned long iflags; 665 666 if (phba->sli_rev != LPFC_SLI_REV4) 667 return NULL; 668 spin_lock_irqsave(&phba->hbalock, iflags); 669 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 670 if (rrq->vport == vport && rrq->xritag == xri && 671 rrq->nlp_DID == did){ 672 list_del(&rrq->list); 673 spin_unlock_irqrestore(&phba->hbalock, iflags); 674 return rrq; 675 } 676 } 677 spin_unlock_irqrestore(&phba->hbalock, iflags); 678 return NULL; 679 } 680 681 /** 682 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 683 * @vport: Pointer to vport context object. 684 * @ndlp: Pointer to the lpfc_node_list structure. 685 * If ndlp is NULL Remove all active RRQs for this vport from the 686 * phba->active_rrq_list and clear the rrq. 687 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 688 **/ 689 void 690 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 691 692 { 693 struct lpfc_hba *phba = vport->phba; 694 struct lpfc_node_rrq *rrq; 695 struct lpfc_node_rrq *nextrrq; 696 unsigned long iflags; 697 LIST_HEAD(rrq_list); 698 699 if (phba->sli_rev != LPFC_SLI_REV4) 700 return; 701 if (!ndlp) { 702 lpfc_sli4_vport_delete_els_xri_aborted(vport); 703 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 704 } 705 spin_lock_irqsave(&phba->hbalock, iflags); 706 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 707 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 708 list_move(&rrq->list, &rrq_list); 709 spin_unlock_irqrestore(&phba->hbalock, iflags); 710 711 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 712 list_del(&rrq->list); 713 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 714 } 715 } 716 717 /** 718 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 719 * @phba: Pointer to HBA context object. 720 * 721 * Remove all rrqs from the phba->active_rrq_list and free them by 722 * calling __lpfc_clr_active_rrq 723 * 724 **/ 725 void 726 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 727 { 728 struct lpfc_node_rrq *rrq; 729 struct lpfc_node_rrq *nextrrq; 730 unsigned long next_time; 731 unsigned long iflags; 732 LIST_HEAD(rrq_list); 733 734 if (phba->sli_rev != LPFC_SLI_REV4) 735 return; 736 spin_lock_irqsave(&phba->hbalock, iflags); 737 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 738 next_time = jiffies + HZ * (phba->fc_ratov * 2); 739 list_splice_init(&phba->active_rrq_list, &rrq_list); 740 spin_unlock_irqrestore(&phba->hbalock, iflags); 741 742 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 743 list_del(&rrq->list); 744 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 745 } 746 if (!list_empty(&phba->active_rrq_list)) 747 mod_timer(&phba->rrq_tmr, next_time); 748 } 749 750 751 /** 752 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 753 * @phba: Pointer to HBA context object. 754 * @ndlp: Targets nodelist pointer for this exchange. 755 * @xritag the xri in the bitmap to test. 756 * 757 * This function is called with hbalock held. This function 758 * returns 0 = rrq not active for this xri 759 * 1 = rrq is valid for this xri. 760 **/ 761 int 762 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 763 uint16_t xritag) 764 { 765 if (!ndlp) 766 return 0; 767 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 768 return 1; 769 else 770 return 0; 771 } 772 773 /** 774 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 775 * @phba: Pointer to HBA context object. 776 * @ndlp: nodelist pointer for this target. 777 * @xritag: xri used in this exchange. 778 * @rxid: Remote Exchange ID. 779 * @send_rrq: Flag used to determine if we should send rrq els cmd. 780 * 781 * This function takes the hbalock. 782 * The active bit is always set in the active rrq xri_bitmap even 783 * if there is no slot avaiable for the other rrq information. 784 * 785 * returns 0 rrq actived for this xri 786 * < 0 No memory or invalid ndlp. 787 **/ 788 int 789 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 790 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 791 { 792 unsigned long iflags; 793 struct lpfc_node_rrq *rrq; 794 int empty; 795 796 if (!ndlp) 797 return -EINVAL; 798 799 if (!phba->cfg_enable_rrq) 800 return -EINVAL; 801 802 spin_lock_irqsave(&phba->hbalock, iflags); 803 if (phba->pport->load_flag & FC_UNLOADING) { 804 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 805 goto out; 806 } 807 808 /* 809 * set the active bit even if there is no mem available. 810 */ 811 if (NLP_CHK_FREE_REQ(ndlp)) 812 goto out; 813 814 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 815 goto out; 816 817 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 818 goto out; 819 820 spin_unlock_irqrestore(&phba->hbalock, iflags); 821 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 822 if (!rrq) { 823 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 824 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 825 " DID:0x%x Send:%d\n", 826 xritag, rxid, ndlp->nlp_DID, send_rrq); 827 return -EINVAL; 828 } 829 rrq->send_rrq = send_rrq; 830 rrq->xritag = xritag; 831 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 832 rrq->ndlp = ndlp; 833 rrq->nlp_DID = ndlp->nlp_DID; 834 rrq->vport = ndlp->vport; 835 rrq->rxid = rxid; 836 rrq->send_rrq = send_rrq; 837 spin_lock_irqsave(&phba->hbalock, iflags); 838 empty = list_empty(&phba->active_rrq_list); 839 list_add_tail(&rrq->list, &phba->active_rrq_list); 840 phba->hba_flag |= HBA_RRQ_ACTIVE; 841 if (empty) 842 lpfc_worker_wake_up(phba); 843 spin_unlock_irqrestore(&phba->hbalock, iflags); 844 return 0; 845 out: 846 spin_unlock_irqrestore(&phba->hbalock, iflags); 847 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 848 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 849 " DID:0x%x Send:%d\n", 850 xritag, rxid, ndlp->nlp_DID, send_rrq); 851 return -EINVAL; 852 } 853 854 /** 855 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 856 * @phba: Pointer to HBA context object. 857 * @piocb: Pointer to the iocbq. 858 * 859 * This function is called with hbalock held. This function 860 * gets a new driver sglq object from the sglq list. If the 861 * list is not empty then it is successful, it returns pointer to the newly 862 * allocated sglq object else it returns NULL. 863 **/ 864 static struct lpfc_sglq * 865 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 866 { 867 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 868 struct lpfc_sglq *sglq = NULL; 869 struct lpfc_sglq *start_sglq = NULL; 870 struct lpfc_scsi_buf *lpfc_cmd; 871 struct lpfc_nodelist *ndlp; 872 int found = 0; 873 874 if (piocbq->iocb_flag & LPFC_IO_FCP) { 875 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 876 ndlp = lpfc_cmd->rdata->pnode; 877 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 878 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 879 ndlp = piocbq->context_un.ndlp; 880 else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && 881 (piocbq->iocb_flag & LPFC_IO_LIBDFC)) 882 ndlp = piocbq->context_un.ndlp; 883 else 884 ndlp = piocbq->context1; 885 886 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 887 start_sglq = sglq; 888 while (!found) { 889 if (!sglq) 890 return NULL; 891 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) { 892 /* This xri has an rrq outstanding for this DID. 893 * put it back in the list and get another xri. 894 */ 895 list_add_tail(&sglq->list, lpfc_sgl_list); 896 sglq = NULL; 897 list_remove_head(lpfc_sgl_list, sglq, 898 struct lpfc_sglq, list); 899 if (sglq == start_sglq) { 900 sglq = NULL; 901 break; 902 } else 903 continue; 904 } 905 sglq->ndlp = ndlp; 906 found = 1; 907 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 908 sglq->state = SGL_ALLOCATED; 909 } 910 return sglq; 911 } 912 913 /** 914 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 915 * @phba: Pointer to HBA context object. 916 * 917 * This function is called with no lock held. This function 918 * allocates a new driver iocb object from the iocb pool. If the 919 * allocation is successful, it returns pointer to the newly 920 * allocated iocb object else it returns NULL. 921 **/ 922 struct lpfc_iocbq * 923 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 924 { 925 struct lpfc_iocbq * iocbq = NULL; 926 unsigned long iflags; 927 928 spin_lock_irqsave(&phba->hbalock, iflags); 929 iocbq = __lpfc_sli_get_iocbq(phba); 930 spin_unlock_irqrestore(&phba->hbalock, iflags); 931 return iocbq; 932 } 933 934 /** 935 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 936 * @phba: Pointer to HBA context object. 937 * @iocbq: Pointer to driver iocb object. 938 * 939 * This function is called with hbalock held to release driver 940 * iocb object to the iocb pool. The iotag in the iocb object 941 * does not change for each use of the iocb object. This function 942 * clears all other fields of the iocb object when it is freed. 943 * The sqlq structure that holds the xritag and phys and virtual 944 * mappings for the scatter gather list is retrieved from the 945 * active array of sglq. The get of the sglq pointer also clears 946 * the entry in the array. If the status of the IO indiactes that 947 * this IO was aborted then the sglq entry it put on the 948 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 949 * IO has good status or fails for any other reason then the sglq 950 * entry is added to the free list (lpfc_sgl_list). 951 **/ 952 static void 953 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 954 { 955 struct lpfc_sglq *sglq; 956 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 957 unsigned long iflag = 0; 958 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 959 960 if (iocbq->sli4_xritag == NO_XRI) 961 sglq = NULL; 962 else 963 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 964 965 if (sglq) { 966 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 967 (sglq->state != SGL_XRI_ABORTED)) { 968 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 969 iflag); 970 list_add(&sglq->list, 971 &phba->sli4_hba.lpfc_abts_els_sgl_list); 972 spin_unlock_irqrestore( 973 &phba->sli4_hba.abts_sgl_list_lock, iflag); 974 } else { 975 sglq->state = SGL_FREED; 976 sglq->ndlp = NULL; 977 list_add_tail(&sglq->list, 978 &phba->sli4_hba.lpfc_sgl_list); 979 980 /* Check if TXQ queue needs to be serviced */ 981 if (pring->txq_cnt) 982 lpfc_worker_wake_up(phba); 983 } 984 } 985 986 987 /* 988 * Clean all volatile data fields, preserve iotag and node struct. 989 */ 990 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 991 iocbq->sli4_lxritag = NO_XRI; 992 iocbq->sli4_xritag = NO_XRI; 993 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 994 } 995 996 997 /** 998 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 999 * @phba: Pointer to HBA context object. 1000 * @iocbq: Pointer to driver iocb object. 1001 * 1002 * This function is called with hbalock held to release driver 1003 * iocb object to the iocb pool. The iotag in the iocb object 1004 * does not change for each use of the iocb object. This function 1005 * clears all other fields of the iocb object when it is freed. 1006 **/ 1007 static void 1008 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1009 { 1010 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1011 1012 /* 1013 * Clean all volatile data fields, preserve iotag and node struct. 1014 */ 1015 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1016 iocbq->sli4_xritag = NO_XRI; 1017 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1018 } 1019 1020 /** 1021 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1022 * @phba: Pointer to HBA context object. 1023 * @iocbq: Pointer to driver iocb object. 1024 * 1025 * This function is called with hbalock held to release driver 1026 * iocb object to the iocb pool. The iotag in the iocb object 1027 * does not change for each use of the iocb object. This function 1028 * clears all other fields of the iocb object when it is freed. 1029 **/ 1030 static void 1031 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1032 { 1033 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1034 phba->iocb_cnt--; 1035 } 1036 1037 /** 1038 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1039 * @phba: Pointer to HBA context object. 1040 * @iocbq: Pointer to driver iocb object. 1041 * 1042 * This function is called with no lock held to release the iocb to 1043 * iocb pool. 1044 **/ 1045 void 1046 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1047 { 1048 unsigned long iflags; 1049 1050 /* 1051 * Clean all volatile data fields, preserve iotag and node struct. 1052 */ 1053 spin_lock_irqsave(&phba->hbalock, iflags); 1054 __lpfc_sli_release_iocbq(phba, iocbq); 1055 spin_unlock_irqrestore(&phba->hbalock, iflags); 1056 } 1057 1058 /** 1059 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1060 * @phba: Pointer to HBA context object. 1061 * @iocblist: List of IOCBs. 1062 * @ulpstatus: ULP status in IOCB command field. 1063 * @ulpWord4: ULP word-4 in IOCB command field. 1064 * 1065 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1066 * on the list by invoking the complete callback function associated with the 1067 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1068 * fields. 1069 **/ 1070 void 1071 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1072 uint32_t ulpstatus, uint32_t ulpWord4) 1073 { 1074 struct lpfc_iocbq *piocb; 1075 1076 while (!list_empty(iocblist)) { 1077 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1078 1079 if (!piocb->iocb_cmpl) 1080 lpfc_sli_release_iocbq(phba, piocb); 1081 else { 1082 piocb->iocb.ulpStatus = ulpstatus; 1083 piocb->iocb.un.ulpWord[4] = ulpWord4; 1084 (piocb->iocb_cmpl) (phba, piocb, piocb); 1085 } 1086 } 1087 return; 1088 } 1089 1090 /** 1091 * lpfc_sli_iocb_cmd_type - Get the iocb type 1092 * @iocb_cmnd: iocb command code. 1093 * 1094 * This function is called by ring event handler function to get the iocb type. 1095 * This function translates the iocb command to an iocb command type used to 1096 * decide the final disposition of each completed IOCB. 1097 * The function returns 1098 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1099 * LPFC_SOL_IOCB if it is a solicited iocb completion 1100 * LPFC_ABORT_IOCB if it is an abort iocb 1101 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1102 * 1103 * The caller is not required to hold any lock. 1104 **/ 1105 static lpfc_iocb_type 1106 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1107 { 1108 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1109 1110 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1111 return 0; 1112 1113 switch (iocb_cmnd) { 1114 case CMD_XMIT_SEQUENCE_CR: 1115 case CMD_XMIT_SEQUENCE_CX: 1116 case CMD_XMIT_BCAST_CN: 1117 case CMD_XMIT_BCAST_CX: 1118 case CMD_ELS_REQUEST_CR: 1119 case CMD_ELS_REQUEST_CX: 1120 case CMD_CREATE_XRI_CR: 1121 case CMD_CREATE_XRI_CX: 1122 case CMD_GET_RPI_CN: 1123 case CMD_XMIT_ELS_RSP_CX: 1124 case CMD_GET_RPI_CR: 1125 case CMD_FCP_IWRITE_CR: 1126 case CMD_FCP_IWRITE_CX: 1127 case CMD_FCP_IREAD_CR: 1128 case CMD_FCP_IREAD_CX: 1129 case CMD_FCP_ICMND_CR: 1130 case CMD_FCP_ICMND_CX: 1131 case CMD_FCP_TSEND_CX: 1132 case CMD_FCP_TRSP_CX: 1133 case CMD_FCP_TRECEIVE_CX: 1134 case CMD_FCP_AUTO_TRSP_CX: 1135 case CMD_ADAPTER_MSG: 1136 case CMD_ADAPTER_DUMP: 1137 case CMD_XMIT_SEQUENCE64_CR: 1138 case CMD_XMIT_SEQUENCE64_CX: 1139 case CMD_XMIT_BCAST64_CN: 1140 case CMD_XMIT_BCAST64_CX: 1141 case CMD_ELS_REQUEST64_CR: 1142 case CMD_ELS_REQUEST64_CX: 1143 case CMD_FCP_IWRITE64_CR: 1144 case CMD_FCP_IWRITE64_CX: 1145 case CMD_FCP_IREAD64_CR: 1146 case CMD_FCP_IREAD64_CX: 1147 case CMD_FCP_ICMND64_CR: 1148 case CMD_FCP_ICMND64_CX: 1149 case CMD_FCP_TSEND64_CX: 1150 case CMD_FCP_TRSP64_CX: 1151 case CMD_FCP_TRECEIVE64_CX: 1152 case CMD_GEN_REQUEST64_CR: 1153 case CMD_GEN_REQUEST64_CX: 1154 case CMD_XMIT_ELS_RSP64_CX: 1155 case DSSCMD_IWRITE64_CR: 1156 case DSSCMD_IWRITE64_CX: 1157 case DSSCMD_IREAD64_CR: 1158 case DSSCMD_IREAD64_CX: 1159 type = LPFC_SOL_IOCB; 1160 break; 1161 case CMD_ABORT_XRI_CN: 1162 case CMD_ABORT_XRI_CX: 1163 case CMD_CLOSE_XRI_CN: 1164 case CMD_CLOSE_XRI_CX: 1165 case CMD_XRI_ABORTED_CX: 1166 case CMD_ABORT_MXRI64_CN: 1167 case CMD_XMIT_BLS_RSP64_CX: 1168 type = LPFC_ABORT_IOCB; 1169 break; 1170 case CMD_RCV_SEQUENCE_CX: 1171 case CMD_RCV_ELS_REQ_CX: 1172 case CMD_RCV_SEQUENCE64_CX: 1173 case CMD_RCV_ELS_REQ64_CX: 1174 case CMD_ASYNC_STATUS: 1175 case CMD_IOCB_RCV_SEQ64_CX: 1176 case CMD_IOCB_RCV_ELS64_CX: 1177 case CMD_IOCB_RCV_CONT64_CX: 1178 case CMD_IOCB_RET_XRI64_CX: 1179 type = LPFC_UNSOL_IOCB; 1180 break; 1181 case CMD_IOCB_XMIT_MSEQ64_CR: 1182 case CMD_IOCB_XMIT_MSEQ64_CX: 1183 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1184 case CMD_IOCB_RCV_ELS_LIST64_CX: 1185 case CMD_IOCB_CLOSE_EXTENDED_CN: 1186 case CMD_IOCB_ABORT_EXTENDED_CN: 1187 case CMD_IOCB_RET_HBQE64_CN: 1188 case CMD_IOCB_FCP_IBIDIR64_CR: 1189 case CMD_IOCB_FCP_IBIDIR64_CX: 1190 case CMD_IOCB_FCP_ITASKMGT64_CX: 1191 case CMD_IOCB_LOGENTRY_CN: 1192 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1193 printk("%s - Unhandled SLI-3 Command x%x\n", 1194 __func__, iocb_cmnd); 1195 type = LPFC_UNKNOWN_IOCB; 1196 break; 1197 default: 1198 type = LPFC_UNKNOWN_IOCB; 1199 break; 1200 } 1201 1202 return type; 1203 } 1204 1205 /** 1206 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1207 * @phba: Pointer to HBA context object. 1208 * 1209 * This function is called from SLI initialization code 1210 * to configure every ring of the HBA's SLI interface. The 1211 * caller is not required to hold any lock. This function issues 1212 * a config_ring mailbox command for each ring. 1213 * This function returns zero if successful else returns a negative 1214 * error code. 1215 **/ 1216 static int 1217 lpfc_sli_ring_map(struct lpfc_hba *phba) 1218 { 1219 struct lpfc_sli *psli = &phba->sli; 1220 LPFC_MBOXQ_t *pmb; 1221 MAILBOX_t *pmbox; 1222 int i, rc, ret = 0; 1223 1224 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1225 if (!pmb) 1226 return -ENOMEM; 1227 pmbox = &pmb->u.mb; 1228 phba->link_state = LPFC_INIT_MBX_CMDS; 1229 for (i = 0; i < psli->num_rings; i++) { 1230 lpfc_config_ring(phba, i, pmb); 1231 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1232 if (rc != MBX_SUCCESS) { 1233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1234 "0446 Adapter failed to init (%d), " 1235 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1236 "ring %d\n", 1237 rc, pmbox->mbxCommand, 1238 pmbox->mbxStatus, i); 1239 phba->link_state = LPFC_HBA_ERROR; 1240 ret = -ENXIO; 1241 break; 1242 } 1243 } 1244 mempool_free(pmb, phba->mbox_mem_pool); 1245 return ret; 1246 } 1247 1248 /** 1249 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1250 * @phba: Pointer to HBA context object. 1251 * @pring: Pointer to driver SLI ring object. 1252 * @piocb: Pointer to the driver iocb object. 1253 * 1254 * This function is called with hbalock held. The function adds the 1255 * new iocb to txcmplq of the given ring. This function always returns 1256 * 0. If this function is called for ELS ring, this function checks if 1257 * there is a vport associated with the ELS command. This function also 1258 * starts els_tmofunc timer if this is an ELS command. 1259 **/ 1260 static int 1261 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1262 struct lpfc_iocbq *piocb) 1263 { 1264 list_add_tail(&piocb->list, &pring->txcmplq); 1265 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1266 pring->txcmplq_cnt++; 1267 if (pring->txcmplq_cnt > pring->txcmplq_max) 1268 pring->txcmplq_max = pring->txcmplq_cnt; 1269 1270 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1271 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1272 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1273 if (!piocb->vport) 1274 BUG(); 1275 else 1276 mod_timer(&piocb->vport->els_tmofunc, 1277 jiffies + HZ * (phba->fc_ratov << 1)); 1278 } 1279 1280 1281 return 0; 1282 } 1283 1284 /** 1285 * lpfc_sli_ringtx_get - Get first element of the txq 1286 * @phba: Pointer to HBA context object. 1287 * @pring: Pointer to driver SLI ring object. 1288 * 1289 * This function is called with hbalock held to get next 1290 * iocb in txq of the given ring. If there is any iocb in 1291 * the txq, the function returns first iocb in the list after 1292 * removing the iocb from the list, else it returns NULL. 1293 **/ 1294 struct lpfc_iocbq * 1295 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1296 { 1297 struct lpfc_iocbq *cmd_iocb; 1298 1299 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1300 if (cmd_iocb != NULL) 1301 pring->txq_cnt--; 1302 return cmd_iocb; 1303 } 1304 1305 /** 1306 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1307 * @phba: Pointer to HBA context object. 1308 * @pring: Pointer to driver SLI ring object. 1309 * 1310 * This function is called with hbalock held and the caller must post the 1311 * iocb without releasing the lock. If the caller releases the lock, 1312 * iocb slot returned by the function is not guaranteed to be available. 1313 * The function returns pointer to the next available iocb slot if there 1314 * is available slot in the ring, else it returns NULL. 1315 * If the get index of the ring is ahead of the put index, the function 1316 * will post an error attention event to the worker thread to take the 1317 * HBA to offline state. 1318 **/ 1319 static IOCB_t * 1320 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1321 { 1322 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1323 uint32_t max_cmd_idx = pring->numCiocb; 1324 if ((pring->next_cmdidx == pring->cmdidx) && 1325 (++pring->next_cmdidx >= max_cmd_idx)) 1326 pring->next_cmdidx = 0; 1327 1328 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1329 1330 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1331 1332 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1334 "0315 Ring %d issue: portCmdGet %d " 1335 "is bigger than cmd ring %d\n", 1336 pring->ringno, 1337 pring->local_getidx, max_cmd_idx); 1338 1339 phba->link_state = LPFC_HBA_ERROR; 1340 /* 1341 * All error attention handlers are posted to 1342 * worker thread 1343 */ 1344 phba->work_ha |= HA_ERATT; 1345 phba->work_hs = HS_FFER3; 1346 1347 lpfc_worker_wake_up(phba); 1348 1349 return NULL; 1350 } 1351 1352 if (pring->local_getidx == pring->next_cmdidx) 1353 return NULL; 1354 } 1355 1356 return lpfc_cmd_iocb(phba, pring); 1357 } 1358 1359 /** 1360 * lpfc_sli_next_iotag - Get an iotag for the iocb 1361 * @phba: Pointer to HBA context object. 1362 * @iocbq: Pointer to driver iocb object. 1363 * 1364 * This function gets an iotag for the iocb. If there is no unused iotag and 1365 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1366 * array and assigns a new iotag. 1367 * The function returns the allocated iotag if successful, else returns zero. 1368 * Zero is not a valid iotag. 1369 * The caller is not required to hold any lock. 1370 **/ 1371 uint16_t 1372 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1373 { 1374 struct lpfc_iocbq **new_arr; 1375 struct lpfc_iocbq **old_arr; 1376 size_t new_len; 1377 struct lpfc_sli *psli = &phba->sli; 1378 uint16_t iotag; 1379 1380 spin_lock_irq(&phba->hbalock); 1381 iotag = psli->last_iotag; 1382 if(++iotag < psli->iocbq_lookup_len) { 1383 psli->last_iotag = iotag; 1384 psli->iocbq_lookup[iotag] = iocbq; 1385 spin_unlock_irq(&phba->hbalock); 1386 iocbq->iotag = iotag; 1387 return iotag; 1388 } else if (psli->iocbq_lookup_len < (0xffff 1389 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1390 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1391 spin_unlock_irq(&phba->hbalock); 1392 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1393 GFP_KERNEL); 1394 if (new_arr) { 1395 spin_lock_irq(&phba->hbalock); 1396 old_arr = psli->iocbq_lookup; 1397 if (new_len <= psli->iocbq_lookup_len) { 1398 /* highly unprobable case */ 1399 kfree(new_arr); 1400 iotag = psli->last_iotag; 1401 if(++iotag < psli->iocbq_lookup_len) { 1402 psli->last_iotag = iotag; 1403 psli->iocbq_lookup[iotag] = iocbq; 1404 spin_unlock_irq(&phba->hbalock); 1405 iocbq->iotag = iotag; 1406 return iotag; 1407 } 1408 spin_unlock_irq(&phba->hbalock); 1409 return 0; 1410 } 1411 if (psli->iocbq_lookup) 1412 memcpy(new_arr, old_arr, 1413 ((psli->last_iotag + 1) * 1414 sizeof (struct lpfc_iocbq *))); 1415 psli->iocbq_lookup = new_arr; 1416 psli->iocbq_lookup_len = new_len; 1417 psli->last_iotag = iotag; 1418 psli->iocbq_lookup[iotag] = iocbq; 1419 spin_unlock_irq(&phba->hbalock); 1420 iocbq->iotag = iotag; 1421 kfree(old_arr); 1422 return iotag; 1423 } 1424 } else 1425 spin_unlock_irq(&phba->hbalock); 1426 1427 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1428 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1429 psli->last_iotag); 1430 1431 return 0; 1432 } 1433 1434 /** 1435 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1436 * @phba: Pointer to HBA context object. 1437 * @pring: Pointer to driver SLI ring object. 1438 * @iocb: Pointer to iocb slot in the ring. 1439 * @nextiocb: Pointer to driver iocb object which need to be 1440 * posted to firmware. 1441 * 1442 * This function is called with hbalock held to post a new iocb to 1443 * the firmware. This function copies the new iocb to ring iocb slot and 1444 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1445 * a completion call back for this iocb else the function will free the 1446 * iocb object. 1447 **/ 1448 static void 1449 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1450 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1451 { 1452 /* 1453 * Set up an iotag 1454 */ 1455 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1456 1457 1458 if (pring->ringno == LPFC_ELS_RING) { 1459 lpfc_debugfs_slow_ring_trc(phba, 1460 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1461 *(((uint32_t *) &nextiocb->iocb) + 4), 1462 *(((uint32_t *) &nextiocb->iocb) + 6), 1463 *(((uint32_t *) &nextiocb->iocb) + 7)); 1464 } 1465 1466 /* 1467 * Issue iocb command to adapter 1468 */ 1469 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1470 wmb(); 1471 pring->stats.iocb_cmd++; 1472 1473 /* 1474 * If there is no completion routine to call, we can release the 1475 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1476 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1477 */ 1478 if (nextiocb->iocb_cmpl) 1479 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1480 else 1481 __lpfc_sli_release_iocbq(phba, nextiocb); 1482 1483 /* 1484 * Let the HBA know what IOCB slot will be the next one the 1485 * driver will put a command into. 1486 */ 1487 pring->cmdidx = pring->next_cmdidx; 1488 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1489 } 1490 1491 /** 1492 * lpfc_sli_update_full_ring - Update the chip attention register 1493 * @phba: Pointer to HBA context object. 1494 * @pring: Pointer to driver SLI ring object. 1495 * 1496 * The caller is not required to hold any lock for calling this function. 1497 * This function updates the chip attention bits for the ring to inform firmware 1498 * that there are pending work to be done for this ring and requests an 1499 * interrupt when there is space available in the ring. This function is 1500 * called when the driver is unable to post more iocbs to the ring due 1501 * to unavailability of space in the ring. 1502 **/ 1503 static void 1504 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1505 { 1506 int ringno = pring->ringno; 1507 1508 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1509 1510 wmb(); 1511 1512 /* 1513 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1514 * The HBA will tell us when an IOCB entry is available. 1515 */ 1516 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1517 readl(phba->CAregaddr); /* flush */ 1518 1519 pring->stats.iocb_cmd_full++; 1520 } 1521 1522 /** 1523 * lpfc_sli_update_ring - Update chip attention register 1524 * @phba: Pointer to HBA context object. 1525 * @pring: Pointer to driver SLI ring object. 1526 * 1527 * This function updates the chip attention register bit for the 1528 * given ring to inform HBA that there is more work to be done 1529 * in this ring. The caller is not required to hold any lock. 1530 **/ 1531 static void 1532 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1533 { 1534 int ringno = pring->ringno; 1535 1536 /* 1537 * Tell the HBA that there is work to do in this ring. 1538 */ 1539 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1540 wmb(); 1541 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1542 readl(phba->CAregaddr); /* flush */ 1543 } 1544 } 1545 1546 /** 1547 * lpfc_sli_resume_iocb - Process iocbs in the txq 1548 * @phba: Pointer to HBA context object. 1549 * @pring: Pointer to driver SLI ring object. 1550 * 1551 * This function is called with hbalock held to post pending iocbs 1552 * in the txq to the firmware. This function is called when driver 1553 * detects space available in the ring. 1554 **/ 1555 static void 1556 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1557 { 1558 IOCB_t *iocb; 1559 struct lpfc_iocbq *nextiocb; 1560 1561 /* 1562 * Check to see if: 1563 * (a) there is anything on the txq to send 1564 * (b) link is up 1565 * (c) link attention events can be processed (fcp ring only) 1566 * (d) IOCB processing is not blocked by the outstanding mbox command. 1567 */ 1568 if (pring->txq_cnt && 1569 lpfc_is_link_up(phba) && 1570 (pring->ringno != phba->sli.fcp_ring || 1571 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1572 1573 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1574 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1575 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1576 1577 if (iocb) 1578 lpfc_sli_update_ring(phba, pring); 1579 else 1580 lpfc_sli_update_full_ring(phba, pring); 1581 } 1582 1583 return; 1584 } 1585 1586 /** 1587 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1588 * @phba: Pointer to HBA context object. 1589 * @hbqno: HBQ number. 1590 * 1591 * This function is called with hbalock held to get the next 1592 * available slot for the given HBQ. If there is free slot 1593 * available for the HBQ it will return pointer to the next available 1594 * HBQ entry else it will return NULL. 1595 **/ 1596 static struct lpfc_hbq_entry * 1597 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1598 { 1599 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1600 1601 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1602 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1603 hbqp->next_hbqPutIdx = 0; 1604 1605 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1606 uint32_t raw_index = phba->hbq_get[hbqno]; 1607 uint32_t getidx = le32_to_cpu(raw_index); 1608 1609 hbqp->local_hbqGetIdx = getidx; 1610 1611 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1612 lpfc_printf_log(phba, KERN_ERR, 1613 LOG_SLI | LOG_VPORT, 1614 "1802 HBQ %d: local_hbqGetIdx " 1615 "%u is > than hbqp->entry_count %u\n", 1616 hbqno, hbqp->local_hbqGetIdx, 1617 hbqp->entry_count); 1618 1619 phba->link_state = LPFC_HBA_ERROR; 1620 return NULL; 1621 } 1622 1623 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1624 return NULL; 1625 } 1626 1627 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1628 hbqp->hbqPutIdx; 1629 } 1630 1631 /** 1632 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1633 * @phba: Pointer to HBA context object. 1634 * 1635 * This function is called with no lock held to free all the 1636 * hbq buffers while uninitializing the SLI interface. It also 1637 * frees the HBQ buffers returned by the firmware but not yet 1638 * processed by the upper layers. 1639 **/ 1640 void 1641 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1642 { 1643 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1644 struct hbq_dmabuf *hbq_buf; 1645 unsigned long flags; 1646 int i, hbq_count; 1647 uint32_t hbqno; 1648 1649 hbq_count = lpfc_sli_hbq_count(); 1650 /* Return all memory used by all HBQs */ 1651 spin_lock_irqsave(&phba->hbalock, flags); 1652 for (i = 0; i < hbq_count; ++i) { 1653 list_for_each_entry_safe(dmabuf, next_dmabuf, 1654 &phba->hbqs[i].hbq_buffer_list, list) { 1655 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1656 list_del(&hbq_buf->dbuf.list); 1657 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1658 } 1659 phba->hbqs[i].buffer_count = 0; 1660 } 1661 /* Return all HBQ buffer that are in-fly */ 1662 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1663 list) { 1664 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1665 list_del(&hbq_buf->dbuf.list); 1666 if (hbq_buf->tag == -1) { 1667 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1668 (phba, hbq_buf); 1669 } else { 1670 hbqno = hbq_buf->tag >> 16; 1671 if (hbqno >= LPFC_MAX_HBQS) 1672 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1673 (phba, hbq_buf); 1674 else 1675 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1676 hbq_buf); 1677 } 1678 } 1679 1680 /* Mark the HBQs not in use */ 1681 phba->hbq_in_use = 0; 1682 spin_unlock_irqrestore(&phba->hbalock, flags); 1683 } 1684 1685 /** 1686 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1687 * @phba: Pointer to HBA context object. 1688 * @hbqno: HBQ number. 1689 * @hbq_buf: Pointer to HBQ buffer. 1690 * 1691 * This function is called with the hbalock held to post a 1692 * hbq buffer to the firmware. If the function finds an empty 1693 * slot in the HBQ, it will post the buffer. The function will return 1694 * pointer to the hbq entry if it successfully post the buffer 1695 * else it will return NULL. 1696 **/ 1697 static int 1698 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1699 struct hbq_dmabuf *hbq_buf) 1700 { 1701 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1702 } 1703 1704 /** 1705 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1706 * @phba: Pointer to HBA context object. 1707 * @hbqno: HBQ number. 1708 * @hbq_buf: Pointer to HBQ buffer. 1709 * 1710 * This function is called with the hbalock held to post a hbq buffer to the 1711 * firmware. If the function finds an empty slot in the HBQ, it will post the 1712 * buffer and place it on the hbq_buffer_list. The function will return zero if 1713 * it successfully post the buffer else it will return an error. 1714 **/ 1715 static int 1716 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1717 struct hbq_dmabuf *hbq_buf) 1718 { 1719 struct lpfc_hbq_entry *hbqe; 1720 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1721 1722 /* Get next HBQ entry slot to use */ 1723 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1724 if (hbqe) { 1725 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1726 1727 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1728 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1729 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1730 hbqe->bde.tus.f.bdeFlags = 0; 1731 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1732 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1733 /* Sync SLIM */ 1734 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1735 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1736 /* flush */ 1737 readl(phba->hbq_put + hbqno); 1738 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1739 return 0; 1740 } else 1741 return -ENOMEM; 1742 } 1743 1744 /** 1745 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1746 * @phba: Pointer to HBA context object. 1747 * @hbqno: HBQ number. 1748 * @hbq_buf: Pointer to HBQ buffer. 1749 * 1750 * This function is called with the hbalock held to post an RQE to the SLI4 1751 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1752 * the hbq_buffer_list and return zero, otherwise it will return an error. 1753 **/ 1754 static int 1755 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1756 struct hbq_dmabuf *hbq_buf) 1757 { 1758 int rc; 1759 struct lpfc_rqe hrqe; 1760 struct lpfc_rqe drqe; 1761 1762 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1763 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1764 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1765 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1766 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1767 &hrqe, &drqe); 1768 if (rc < 0) 1769 return rc; 1770 hbq_buf->tag = rc; 1771 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1772 return 0; 1773 } 1774 1775 /* HBQ for ELS and CT traffic. */ 1776 static struct lpfc_hbq_init lpfc_els_hbq = { 1777 .rn = 1, 1778 .entry_count = 256, 1779 .mask_count = 0, 1780 .profile = 0, 1781 .ring_mask = (1 << LPFC_ELS_RING), 1782 .buffer_count = 0, 1783 .init_count = 40, 1784 .add_count = 40, 1785 }; 1786 1787 /* HBQ for the extra ring if needed */ 1788 static struct lpfc_hbq_init lpfc_extra_hbq = { 1789 .rn = 1, 1790 .entry_count = 200, 1791 .mask_count = 0, 1792 .profile = 0, 1793 .ring_mask = (1 << LPFC_EXTRA_RING), 1794 .buffer_count = 0, 1795 .init_count = 0, 1796 .add_count = 5, 1797 }; 1798 1799 /* Array of HBQs */ 1800 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1801 &lpfc_els_hbq, 1802 &lpfc_extra_hbq, 1803 }; 1804 1805 /** 1806 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1807 * @phba: Pointer to HBA context object. 1808 * @hbqno: HBQ number. 1809 * @count: Number of HBQ buffers to be posted. 1810 * 1811 * This function is called with no lock held to post more hbq buffers to the 1812 * given HBQ. The function returns the number of HBQ buffers successfully 1813 * posted. 1814 **/ 1815 static int 1816 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1817 { 1818 uint32_t i, posted = 0; 1819 unsigned long flags; 1820 struct hbq_dmabuf *hbq_buffer; 1821 LIST_HEAD(hbq_buf_list); 1822 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1823 return 0; 1824 1825 if ((phba->hbqs[hbqno].buffer_count + count) > 1826 lpfc_hbq_defs[hbqno]->entry_count) 1827 count = lpfc_hbq_defs[hbqno]->entry_count - 1828 phba->hbqs[hbqno].buffer_count; 1829 if (!count) 1830 return 0; 1831 /* Allocate HBQ entries */ 1832 for (i = 0; i < count; i++) { 1833 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1834 if (!hbq_buffer) 1835 break; 1836 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1837 } 1838 /* Check whether HBQ is still in use */ 1839 spin_lock_irqsave(&phba->hbalock, flags); 1840 if (!phba->hbq_in_use) 1841 goto err; 1842 while (!list_empty(&hbq_buf_list)) { 1843 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1844 dbuf.list); 1845 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1846 (hbqno << 16)); 1847 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1848 phba->hbqs[hbqno].buffer_count++; 1849 posted++; 1850 } else 1851 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1852 } 1853 spin_unlock_irqrestore(&phba->hbalock, flags); 1854 return posted; 1855 err: 1856 spin_unlock_irqrestore(&phba->hbalock, flags); 1857 while (!list_empty(&hbq_buf_list)) { 1858 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1859 dbuf.list); 1860 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1861 } 1862 return 0; 1863 } 1864 1865 /** 1866 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1867 * @phba: Pointer to HBA context object. 1868 * @qno: HBQ number. 1869 * 1870 * This function posts more buffers to the HBQ. This function 1871 * is called with no lock held. The function returns the number of HBQ entries 1872 * successfully allocated. 1873 **/ 1874 int 1875 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1876 { 1877 if (phba->sli_rev == LPFC_SLI_REV4) 1878 return 0; 1879 else 1880 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1881 lpfc_hbq_defs[qno]->add_count); 1882 } 1883 1884 /** 1885 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1886 * @phba: Pointer to HBA context object. 1887 * @qno: HBQ queue number. 1888 * 1889 * This function is called from SLI initialization code path with 1890 * no lock held to post initial HBQ buffers to firmware. The 1891 * function returns the number of HBQ entries successfully allocated. 1892 **/ 1893 static int 1894 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1895 { 1896 if (phba->sli_rev == LPFC_SLI_REV4) 1897 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1898 lpfc_hbq_defs[qno]->entry_count); 1899 else 1900 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1901 lpfc_hbq_defs[qno]->init_count); 1902 } 1903 1904 /** 1905 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1906 * @phba: Pointer to HBA context object. 1907 * @hbqno: HBQ number. 1908 * 1909 * This function removes the first hbq buffer on an hbq list and returns a 1910 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1911 **/ 1912 static struct hbq_dmabuf * 1913 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1914 { 1915 struct lpfc_dmabuf *d_buf; 1916 1917 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1918 if (!d_buf) 1919 return NULL; 1920 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1921 } 1922 1923 /** 1924 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1925 * @phba: Pointer to HBA context object. 1926 * @tag: Tag of the hbq buffer. 1927 * 1928 * This function is called with hbalock held. This function searches 1929 * for the hbq buffer associated with the given tag in the hbq buffer 1930 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1931 * it returns NULL. 1932 **/ 1933 static struct hbq_dmabuf * 1934 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1935 { 1936 struct lpfc_dmabuf *d_buf; 1937 struct hbq_dmabuf *hbq_buf; 1938 uint32_t hbqno; 1939 1940 hbqno = tag >> 16; 1941 if (hbqno >= LPFC_MAX_HBQS) 1942 return NULL; 1943 1944 spin_lock_irq(&phba->hbalock); 1945 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1946 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1947 if (hbq_buf->tag == tag) { 1948 spin_unlock_irq(&phba->hbalock); 1949 return hbq_buf; 1950 } 1951 } 1952 spin_unlock_irq(&phba->hbalock); 1953 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1954 "1803 Bad hbq tag. Data: x%x x%x\n", 1955 tag, phba->hbqs[tag >> 16].buffer_count); 1956 return NULL; 1957 } 1958 1959 /** 1960 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 1961 * @phba: Pointer to HBA context object. 1962 * @hbq_buffer: Pointer to HBQ buffer. 1963 * 1964 * This function is called with hbalock. This function gives back 1965 * the hbq buffer to firmware. If the HBQ does not have space to 1966 * post the buffer, it will free the buffer. 1967 **/ 1968 void 1969 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 1970 { 1971 uint32_t hbqno; 1972 1973 if (hbq_buffer) { 1974 hbqno = hbq_buffer->tag >> 16; 1975 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 1976 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1977 } 1978 } 1979 1980 /** 1981 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 1982 * @mbxCommand: mailbox command code. 1983 * 1984 * This function is called by the mailbox event handler function to verify 1985 * that the completed mailbox command is a legitimate mailbox command. If the 1986 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 1987 * and the mailbox event handler will take the HBA offline. 1988 **/ 1989 static int 1990 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 1991 { 1992 uint8_t ret; 1993 1994 switch (mbxCommand) { 1995 case MBX_LOAD_SM: 1996 case MBX_READ_NV: 1997 case MBX_WRITE_NV: 1998 case MBX_WRITE_VPARMS: 1999 case MBX_RUN_BIU_DIAG: 2000 case MBX_INIT_LINK: 2001 case MBX_DOWN_LINK: 2002 case MBX_CONFIG_LINK: 2003 case MBX_CONFIG_RING: 2004 case MBX_RESET_RING: 2005 case MBX_READ_CONFIG: 2006 case MBX_READ_RCONFIG: 2007 case MBX_READ_SPARM: 2008 case MBX_READ_STATUS: 2009 case MBX_READ_RPI: 2010 case MBX_READ_XRI: 2011 case MBX_READ_REV: 2012 case MBX_READ_LNK_STAT: 2013 case MBX_REG_LOGIN: 2014 case MBX_UNREG_LOGIN: 2015 case MBX_CLEAR_LA: 2016 case MBX_DUMP_MEMORY: 2017 case MBX_DUMP_CONTEXT: 2018 case MBX_RUN_DIAGS: 2019 case MBX_RESTART: 2020 case MBX_UPDATE_CFG: 2021 case MBX_DOWN_LOAD: 2022 case MBX_DEL_LD_ENTRY: 2023 case MBX_RUN_PROGRAM: 2024 case MBX_SET_MASK: 2025 case MBX_SET_VARIABLE: 2026 case MBX_UNREG_D_ID: 2027 case MBX_KILL_BOARD: 2028 case MBX_CONFIG_FARP: 2029 case MBX_BEACON: 2030 case MBX_LOAD_AREA: 2031 case MBX_RUN_BIU_DIAG64: 2032 case MBX_CONFIG_PORT: 2033 case MBX_READ_SPARM64: 2034 case MBX_READ_RPI64: 2035 case MBX_REG_LOGIN64: 2036 case MBX_READ_TOPOLOGY: 2037 case MBX_WRITE_WWN: 2038 case MBX_SET_DEBUG: 2039 case MBX_LOAD_EXP_ROM: 2040 case MBX_ASYNCEVT_ENABLE: 2041 case MBX_REG_VPI: 2042 case MBX_UNREG_VPI: 2043 case MBX_HEARTBEAT: 2044 case MBX_PORT_CAPABILITIES: 2045 case MBX_PORT_IOV_CONTROL: 2046 case MBX_SLI4_CONFIG: 2047 case MBX_SLI4_REQ_FTRS: 2048 case MBX_REG_FCFI: 2049 case MBX_UNREG_FCFI: 2050 case MBX_REG_VFI: 2051 case MBX_UNREG_VFI: 2052 case MBX_INIT_VPI: 2053 case MBX_INIT_VFI: 2054 case MBX_RESUME_RPI: 2055 case MBX_READ_EVENT_LOG_STATUS: 2056 case MBX_READ_EVENT_LOG: 2057 case MBX_SECURITY_MGMT: 2058 case MBX_AUTH_PORT: 2059 ret = mbxCommand; 2060 break; 2061 default: 2062 ret = MBX_SHUTDOWN; 2063 break; 2064 } 2065 return ret; 2066 } 2067 2068 /** 2069 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2070 * @phba: Pointer to HBA context object. 2071 * @pmboxq: Pointer to mailbox command. 2072 * 2073 * This is completion handler function for mailbox commands issued from 2074 * lpfc_sli_issue_mbox_wait function. This function is called by the 2075 * mailbox event handler function with no lock held. This function 2076 * will wake up thread waiting on the wait queue pointed by context1 2077 * of the mailbox. 2078 **/ 2079 void 2080 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2081 { 2082 wait_queue_head_t *pdone_q; 2083 unsigned long drvr_flag; 2084 2085 /* 2086 * If pdone_q is empty, the driver thread gave up waiting and 2087 * continued running. 2088 */ 2089 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2090 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2091 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2092 if (pdone_q) 2093 wake_up_interruptible(pdone_q); 2094 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2095 return; 2096 } 2097 2098 2099 /** 2100 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2101 * @phba: Pointer to HBA context object. 2102 * @pmb: Pointer to mailbox object. 2103 * 2104 * This function is the default mailbox completion handler. It 2105 * frees the memory resources associated with the completed mailbox 2106 * command. If the completed command is a REG_LOGIN mailbox command, 2107 * this function will issue a UREG_LOGIN to re-claim the RPI. 2108 **/ 2109 void 2110 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2111 { 2112 struct lpfc_vport *vport = pmb->vport; 2113 struct lpfc_dmabuf *mp; 2114 struct lpfc_nodelist *ndlp; 2115 struct Scsi_Host *shost; 2116 uint16_t rpi, vpi; 2117 int rc; 2118 2119 mp = (struct lpfc_dmabuf *) (pmb->context1); 2120 2121 if (mp) { 2122 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2123 kfree(mp); 2124 } 2125 2126 /* 2127 * If a REG_LOGIN succeeded after node is destroyed or node 2128 * is in re-discovery driver need to cleanup the RPI. 2129 */ 2130 if (!(phba->pport->load_flag & FC_UNLOADING) && 2131 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2132 !pmb->u.mb.mbxStatus) { 2133 rpi = pmb->u.mb.un.varWords[0]; 2134 vpi = pmb->u.mb.un.varRegLogin.vpi; 2135 lpfc_unreg_login(phba, vpi, rpi, pmb); 2136 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2137 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2138 if (rc != MBX_NOT_FINISHED) 2139 return; 2140 } 2141 2142 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2143 !(phba->pport->load_flag & FC_UNLOADING) && 2144 !pmb->u.mb.mbxStatus) { 2145 shost = lpfc_shost_from_vport(vport); 2146 spin_lock_irq(shost->host_lock); 2147 vport->vpi_state |= LPFC_VPI_REGISTERED; 2148 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2149 spin_unlock_irq(shost->host_lock); 2150 } 2151 2152 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2153 ndlp = (struct lpfc_nodelist *)pmb->context2; 2154 lpfc_nlp_put(ndlp); 2155 pmb->context2 = NULL; 2156 } 2157 2158 /* Check security permission status on INIT_LINK mailbox command */ 2159 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2160 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2161 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2162 "2860 SLI authentication is required " 2163 "for INIT_LINK but has not done yet\n"); 2164 2165 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2166 lpfc_sli4_mbox_cmd_free(phba, pmb); 2167 else 2168 mempool_free(pmb, phba->mbox_mem_pool); 2169 } 2170 2171 /** 2172 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2173 * @phba: Pointer to HBA context object. 2174 * 2175 * This function is called with no lock held. This function processes all 2176 * the completed mailbox commands and gives it to upper layers. The interrupt 2177 * service routine processes mailbox completion interrupt and adds completed 2178 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2179 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2180 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2181 * function returns the mailbox commands to the upper layer by calling the 2182 * completion handler function of each mailbox. 2183 **/ 2184 int 2185 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2186 { 2187 MAILBOX_t *pmbox; 2188 LPFC_MBOXQ_t *pmb; 2189 int rc; 2190 LIST_HEAD(cmplq); 2191 2192 phba->sli.slistat.mbox_event++; 2193 2194 /* Get all completed mailboxe buffers into the cmplq */ 2195 spin_lock_irq(&phba->hbalock); 2196 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2197 spin_unlock_irq(&phba->hbalock); 2198 2199 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2200 do { 2201 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2202 if (pmb == NULL) 2203 break; 2204 2205 pmbox = &pmb->u.mb; 2206 2207 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2208 if (pmb->vport) { 2209 lpfc_debugfs_disc_trc(pmb->vport, 2210 LPFC_DISC_TRC_MBOX_VPORT, 2211 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2212 (uint32_t)pmbox->mbxCommand, 2213 pmbox->un.varWords[0], 2214 pmbox->un.varWords[1]); 2215 } 2216 else { 2217 lpfc_debugfs_disc_trc(phba->pport, 2218 LPFC_DISC_TRC_MBOX, 2219 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2220 (uint32_t)pmbox->mbxCommand, 2221 pmbox->un.varWords[0], 2222 pmbox->un.varWords[1]); 2223 } 2224 } 2225 2226 /* 2227 * It is a fatal error if unknown mbox command completion. 2228 */ 2229 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2230 MBX_SHUTDOWN) { 2231 /* Unknown mailbox command compl */ 2232 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2233 "(%d):0323 Unknown Mailbox command " 2234 "x%x (x%x/x%x) Cmpl\n", 2235 pmb->vport ? pmb->vport->vpi : 0, 2236 pmbox->mbxCommand, 2237 lpfc_sli_config_mbox_subsys_get(phba, 2238 pmb), 2239 lpfc_sli_config_mbox_opcode_get(phba, 2240 pmb)); 2241 phba->link_state = LPFC_HBA_ERROR; 2242 phba->work_hs = HS_FFER3; 2243 lpfc_handle_eratt(phba); 2244 continue; 2245 } 2246 2247 if (pmbox->mbxStatus) { 2248 phba->sli.slistat.mbox_stat_err++; 2249 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2250 /* Mbox cmd cmpl error - RETRYing */ 2251 lpfc_printf_log(phba, KERN_INFO, 2252 LOG_MBOX | LOG_SLI, 2253 "(%d):0305 Mbox cmd cmpl " 2254 "error - RETRYing Data: x%x " 2255 "(x%x/x%x) x%x x%x x%x\n", 2256 pmb->vport ? pmb->vport->vpi : 0, 2257 pmbox->mbxCommand, 2258 lpfc_sli_config_mbox_subsys_get(phba, 2259 pmb), 2260 lpfc_sli_config_mbox_opcode_get(phba, 2261 pmb), 2262 pmbox->mbxStatus, 2263 pmbox->un.varWords[0], 2264 pmb->vport->port_state); 2265 pmbox->mbxStatus = 0; 2266 pmbox->mbxOwner = OWN_HOST; 2267 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2268 if (rc != MBX_NOT_FINISHED) 2269 continue; 2270 } 2271 } 2272 2273 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2274 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2275 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2276 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2277 pmb->vport ? pmb->vport->vpi : 0, 2278 pmbox->mbxCommand, 2279 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2280 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2281 pmb->mbox_cmpl, 2282 *((uint32_t *) pmbox), 2283 pmbox->un.varWords[0], 2284 pmbox->un.varWords[1], 2285 pmbox->un.varWords[2], 2286 pmbox->un.varWords[3], 2287 pmbox->un.varWords[4], 2288 pmbox->un.varWords[5], 2289 pmbox->un.varWords[6], 2290 pmbox->un.varWords[7]); 2291 2292 if (pmb->mbox_cmpl) 2293 pmb->mbox_cmpl(phba,pmb); 2294 } while (1); 2295 return 0; 2296 } 2297 2298 /** 2299 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2300 * @phba: Pointer to HBA context object. 2301 * @pring: Pointer to driver SLI ring object. 2302 * @tag: buffer tag. 2303 * 2304 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2305 * is set in the tag the buffer is posted for a particular exchange, 2306 * the function will return the buffer without replacing the buffer. 2307 * If the buffer is for unsolicited ELS or CT traffic, this function 2308 * returns the buffer and also posts another buffer to the firmware. 2309 **/ 2310 static struct lpfc_dmabuf * 2311 lpfc_sli_get_buff(struct lpfc_hba *phba, 2312 struct lpfc_sli_ring *pring, 2313 uint32_t tag) 2314 { 2315 struct hbq_dmabuf *hbq_entry; 2316 2317 if (tag & QUE_BUFTAG_BIT) 2318 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2319 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2320 if (!hbq_entry) 2321 return NULL; 2322 return &hbq_entry->dbuf; 2323 } 2324 2325 /** 2326 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2327 * @phba: Pointer to HBA context object. 2328 * @pring: Pointer to driver SLI ring object. 2329 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2330 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2331 * @fch_type: the type for the first frame of the sequence. 2332 * 2333 * This function is called with no lock held. This function uses the r_ctl and 2334 * type of the received sequence to find the correct callback function to call 2335 * to process the sequence. 2336 **/ 2337 static int 2338 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2339 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2340 uint32_t fch_type) 2341 { 2342 int i; 2343 2344 /* unSolicited Responses */ 2345 if (pring->prt[0].profile) { 2346 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2347 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2348 saveq); 2349 return 1; 2350 } 2351 /* We must search, based on rctl / type 2352 for the right routine */ 2353 for (i = 0; i < pring->num_mask; i++) { 2354 if ((pring->prt[i].rctl == fch_r_ctl) && 2355 (pring->prt[i].type == fch_type)) { 2356 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2357 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2358 (phba, pring, saveq); 2359 return 1; 2360 } 2361 } 2362 return 0; 2363 } 2364 2365 /** 2366 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2367 * @phba: Pointer to HBA context object. 2368 * @pring: Pointer to driver SLI ring object. 2369 * @saveq: Pointer to the unsolicited iocb. 2370 * 2371 * This function is called with no lock held by the ring event handler 2372 * when there is an unsolicited iocb posted to the response ring by the 2373 * firmware. This function gets the buffer associated with the iocbs 2374 * and calls the event handler for the ring. This function handles both 2375 * qring buffers and hbq buffers. 2376 * When the function returns 1 the caller can free the iocb object otherwise 2377 * upper layer functions will free the iocb objects. 2378 **/ 2379 static int 2380 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2381 struct lpfc_iocbq *saveq) 2382 { 2383 IOCB_t * irsp; 2384 WORD5 * w5p; 2385 uint32_t Rctl, Type; 2386 uint32_t match; 2387 struct lpfc_iocbq *iocbq; 2388 struct lpfc_dmabuf *dmzbuf; 2389 2390 match = 0; 2391 irsp = &(saveq->iocb); 2392 2393 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2394 if (pring->lpfc_sli_rcv_async_status) 2395 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2396 else 2397 lpfc_printf_log(phba, 2398 KERN_WARNING, 2399 LOG_SLI, 2400 "0316 Ring %d handler: unexpected " 2401 "ASYNC_STATUS iocb received evt_code " 2402 "0x%x\n", 2403 pring->ringno, 2404 irsp->un.asyncstat.evt_code); 2405 return 1; 2406 } 2407 2408 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2409 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2410 if (irsp->ulpBdeCount > 0) { 2411 dmzbuf = lpfc_sli_get_buff(phba, pring, 2412 irsp->un.ulpWord[3]); 2413 lpfc_in_buf_free(phba, dmzbuf); 2414 } 2415 2416 if (irsp->ulpBdeCount > 1) { 2417 dmzbuf = lpfc_sli_get_buff(phba, pring, 2418 irsp->unsli3.sli3Words[3]); 2419 lpfc_in_buf_free(phba, dmzbuf); 2420 } 2421 2422 if (irsp->ulpBdeCount > 2) { 2423 dmzbuf = lpfc_sli_get_buff(phba, pring, 2424 irsp->unsli3.sli3Words[7]); 2425 lpfc_in_buf_free(phba, dmzbuf); 2426 } 2427 2428 return 1; 2429 } 2430 2431 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2432 if (irsp->ulpBdeCount != 0) { 2433 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2434 irsp->un.ulpWord[3]); 2435 if (!saveq->context2) 2436 lpfc_printf_log(phba, 2437 KERN_ERR, 2438 LOG_SLI, 2439 "0341 Ring %d Cannot find buffer for " 2440 "an unsolicited iocb. tag 0x%x\n", 2441 pring->ringno, 2442 irsp->un.ulpWord[3]); 2443 } 2444 if (irsp->ulpBdeCount == 2) { 2445 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2446 irsp->unsli3.sli3Words[7]); 2447 if (!saveq->context3) 2448 lpfc_printf_log(phba, 2449 KERN_ERR, 2450 LOG_SLI, 2451 "0342 Ring %d Cannot find buffer for an" 2452 " unsolicited iocb. tag 0x%x\n", 2453 pring->ringno, 2454 irsp->unsli3.sli3Words[7]); 2455 } 2456 list_for_each_entry(iocbq, &saveq->list, list) { 2457 irsp = &(iocbq->iocb); 2458 if (irsp->ulpBdeCount != 0) { 2459 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2460 irsp->un.ulpWord[3]); 2461 if (!iocbq->context2) 2462 lpfc_printf_log(phba, 2463 KERN_ERR, 2464 LOG_SLI, 2465 "0343 Ring %d Cannot find " 2466 "buffer for an unsolicited iocb" 2467 ". tag 0x%x\n", pring->ringno, 2468 irsp->un.ulpWord[3]); 2469 } 2470 if (irsp->ulpBdeCount == 2) { 2471 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2472 irsp->unsli3.sli3Words[7]); 2473 if (!iocbq->context3) 2474 lpfc_printf_log(phba, 2475 KERN_ERR, 2476 LOG_SLI, 2477 "0344 Ring %d Cannot find " 2478 "buffer for an unsolicited " 2479 "iocb. tag 0x%x\n", 2480 pring->ringno, 2481 irsp->unsli3.sli3Words[7]); 2482 } 2483 } 2484 } 2485 if (irsp->ulpBdeCount != 0 && 2486 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2487 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2488 int found = 0; 2489 2490 /* search continue save q for same XRI */ 2491 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2492 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2493 saveq->iocb.unsli3.rcvsli3.ox_id) { 2494 list_add_tail(&saveq->list, &iocbq->list); 2495 found = 1; 2496 break; 2497 } 2498 } 2499 if (!found) 2500 list_add_tail(&saveq->clist, 2501 &pring->iocb_continue_saveq); 2502 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2503 list_del_init(&iocbq->clist); 2504 saveq = iocbq; 2505 irsp = &(saveq->iocb); 2506 } else 2507 return 0; 2508 } 2509 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2510 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2511 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2512 Rctl = FC_RCTL_ELS_REQ; 2513 Type = FC_TYPE_ELS; 2514 } else { 2515 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2516 Rctl = w5p->hcsw.Rctl; 2517 Type = w5p->hcsw.Type; 2518 2519 /* Firmware Workaround */ 2520 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2521 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2522 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2523 Rctl = FC_RCTL_ELS_REQ; 2524 Type = FC_TYPE_ELS; 2525 w5p->hcsw.Rctl = Rctl; 2526 w5p->hcsw.Type = Type; 2527 } 2528 } 2529 2530 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2531 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2532 "0313 Ring %d handler: unexpected Rctl x%x " 2533 "Type x%x received\n", 2534 pring->ringno, Rctl, Type); 2535 2536 return 1; 2537 } 2538 2539 /** 2540 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2541 * @phba: Pointer to HBA context object. 2542 * @pring: Pointer to driver SLI ring object. 2543 * @prspiocb: Pointer to response iocb object. 2544 * 2545 * This function looks up the iocb_lookup table to get the command iocb 2546 * corresponding to the given response iocb using the iotag of the 2547 * response iocb. This function is called with the hbalock held. 2548 * This function returns the command iocb object if it finds the command 2549 * iocb else returns NULL. 2550 **/ 2551 static struct lpfc_iocbq * 2552 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2553 struct lpfc_sli_ring *pring, 2554 struct lpfc_iocbq *prspiocb) 2555 { 2556 struct lpfc_iocbq *cmd_iocb = NULL; 2557 uint16_t iotag; 2558 2559 iotag = prspiocb->iocb.ulpIoTag; 2560 2561 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2562 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2563 list_del_init(&cmd_iocb->list); 2564 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2565 pring->txcmplq_cnt--; 2566 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2567 } 2568 return cmd_iocb; 2569 } 2570 2571 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2572 "0317 iotag x%x is out off " 2573 "range: max iotag x%x wd0 x%x\n", 2574 iotag, phba->sli.last_iotag, 2575 *(((uint32_t *) &prspiocb->iocb) + 7)); 2576 return NULL; 2577 } 2578 2579 /** 2580 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2581 * @phba: Pointer to HBA context object. 2582 * @pring: Pointer to driver SLI ring object. 2583 * @iotag: IOCB tag. 2584 * 2585 * This function looks up the iocb_lookup table to get the command iocb 2586 * corresponding to the given iotag. This function is called with the 2587 * hbalock held. 2588 * This function returns the command iocb object if it finds the command 2589 * iocb else returns NULL. 2590 **/ 2591 static struct lpfc_iocbq * 2592 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2593 struct lpfc_sli_ring *pring, uint16_t iotag) 2594 { 2595 struct lpfc_iocbq *cmd_iocb; 2596 2597 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2598 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2599 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2600 /* remove from txcmpl queue list */ 2601 list_del_init(&cmd_iocb->list); 2602 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2603 pring->txcmplq_cnt--; 2604 return cmd_iocb; 2605 } 2606 } 2607 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2608 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2609 iotag, phba->sli.last_iotag); 2610 return NULL; 2611 } 2612 2613 /** 2614 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2615 * @phba: Pointer to HBA context object. 2616 * @pring: Pointer to driver SLI ring object. 2617 * @saveq: Pointer to the response iocb to be processed. 2618 * 2619 * This function is called by the ring event handler for non-fcp 2620 * rings when there is a new response iocb in the response ring. 2621 * The caller is not required to hold any locks. This function 2622 * gets the command iocb associated with the response iocb and 2623 * calls the completion handler for the command iocb. If there 2624 * is no completion handler, the function will free the resources 2625 * associated with command iocb. If the response iocb is for 2626 * an already aborted command iocb, the status of the completion 2627 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2628 * This function always returns 1. 2629 **/ 2630 static int 2631 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2632 struct lpfc_iocbq *saveq) 2633 { 2634 struct lpfc_iocbq *cmdiocbp; 2635 int rc = 1; 2636 unsigned long iflag; 2637 2638 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2639 spin_lock_irqsave(&phba->hbalock, iflag); 2640 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2641 spin_unlock_irqrestore(&phba->hbalock, iflag); 2642 2643 if (cmdiocbp) { 2644 if (cmdiocbp->iocb_cmpl) { 2645 /* 2646 * If an ELS command failed send an event to mgmt 2647 * application. 2648 */ 2649 if (saveq->iocb.ulpStatus && 2650 (pring->ringno == LPFC_ELS_RING) && 2651 (cmdiocbp->iocb.ulpCommand == 2652 CMD_ELS_REQUEST64_CR)) 2653 lpfc_send_els_failure_event(phba, 2654 cmdiocbp, saveq); 2655 2656 /* 2657 * Post all ELS completions to the worker thread. 2658 * All other are passed to the completion callback. 2659 */ 2660 if (pring->ringno == LPFC_ELS_RING) { 2661 if ((phba->sli_rev < LPFC_SLI_REV4) && 2662 (cmdiocbp->iocb_flag & 2663 LPFC_DRIVER_ABORTED)) { 2664 spin_lock_irqsave(&phba->hbalock, 2665 iflag); 2666 cmdiocbp->iocb_flag &= 2667 ~LPFC_DRIVER_ABORTED; 2668 spin_unlock_irqrestore(&phba->hbalock, 2669 iflag); 2670 saveq->iocb.ulpStatus = 2671 IOSTAT_LOCAL_REJECT; 2672 saveq->iocb.un.ulpWord[4] = 2673 IOERR_SLI_ABORTED; 2674 2675 /* Firmware could still be in progress 2676 * of DMAing payload, so don't free data 2677 * buffer till after a hbeat. 2678 */ 2679 spin_lock_irqsave(&phba->hbalock, 2680 iflag); 2681 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2682 spin_unlock_irqrestore(&phba->hbalock, 2683 iflag); 2684 } 2685 if (phba->sli_rev == LPFC_SLI_REV4) { 2686 if (saveq->iocb_flag & 2687 LPFC_EXCHANGE_BUSY) { 2688 /* Set cmdiocb flag for the 2689 * exchange busy so sgl (xri) 2690 * will not be released until 2691 * the abort xri is received 2692 * from hba. 2693 */ 2694 spin_lock_irqsave( 2695 &phba->hbalock, iflag); 2696 cmdiocbp->iocb_flag |= 2697 LPFC_EXCHANGE_BUSY; 2698 spin_unlock_irqrestore( 2699 &phba->hbalock, iflag); 2700 } 2701 if (cmdiocbp->iocb_flag & 2702 LPFC_DRIVER_ABORTED) { 2703 /* 2704 * Clear LPFC_DRIVER_ABORTED 2705 * bit in case it was driver 2706 * initiated abort. 2707 */ 2708 spin_lock_irqsave( 2709 &phba->hbalock, iflag); 2710 cmdiocbp->iocb_flag &= 2711 ~LPFC_DRIVER_ABORTED; 2712 spin_unlock_irqrestore( 2713 &phba->hbalock, iflag); 2714 cmdiocbp->iocb.ulpStatus = 2715 IOSTAT_LOCAL_REJECT; 2716 cmdiocbp->iocb.un.ulpWord[4] = 2717 IOERR_ABORT_REQUESTED; 2718 /* 2719 * For SLI4, irsiocb contains 2720 * NO_XRI in sli_xritag, it 2721 * shall not affect releasing 2722 * sgl (xri) process. 2723 */ 2724 saveq->iocb.ulpStatus = 2725 IOSTAT_LOCAL_REJECT; 2726 saveq->iocb.un.ulpWord[4] = 2727 IOERR_SLI_ABORTED; 2728 spin_lock_irqsave( 2729 &phba->hbalock, iflag); 2730 saveq->iocb_flag |= 2731 LPFC_DELAY_MEM_FREE; 2732 spin_unlock_irqrestore( 2733 &phba->hbalock, iflag); 2734 } 2735 } 2736 } 2737 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2738 } else 2739 lpfc_sli_release_iocbq(phba, cmdiocbp); 2740 } else { 2741 /* 2742 * Unknown initiating command based on the response iotag. 2743 * This could be the case on the ELS ring because of 2744 * lpfc_els_abort(). 2745 */ 2746 if (pring->ringno != LPFC_ELS_RING) { 2747 /* 2748 * Ring <ringno> handler: unexpected completion IoTag 2749 * <IoTag> 2750 */ 2751 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2752 "0322 Ring %d handler: " 2753 "unexpected completion IoTag x%x " 2754 "Data: x%x x%x x%x x%x\n", 2755 pring->ringno, 2756 saveq->iocb.ulpIoTag, 2757 saveq->iocb.ulpStatus, 2758 saveq->iocb.un.ulpWord[4], 2759 saveq->iocb.ulpCommand, 2760 saveq->iocb.ulpContext); 2761 } 2762 } 2763 2764 return rc; 2765 } 2766 2767 /** 2768 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2769 * @phba: Pointer to HBA context object. 2770 * @pring: Pointer to driver SLI ring object. 2771 * 2772 * This function is called from the iocb ring event handlers when 2773 * put pointer is ahead of the get pointer for a ring. This function signal 2774 * an error attention condition to the worker thread and the worker 2775 * thread will transition the HBA to offline state. 2776 **/ 2777 static void 2778 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2779 { 2780 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2781 /* 2782 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2783 * rsp ring <portRspMax> 2784 */ 2785 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2786 "0312 Ring %d handler: portRspPut %d " 2787 "is bigger than rsp ring %d\n", 2788 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2789 pring->numRiocb); 2790 2791 phba->link_state = LPFC_HBA_ERROR; 2792 2793 /* 2794 * All error attention handlers are posted to 2795 * worker thread 2796 */ 2797 phba->work_ha |= HA_ERATT; 2798 phba->work_hs = HS_FFER3; 2799 2800 lpfc_worker_wake_up(phba); 2801 2802 return; 2803 } 2804 2805 /** 2806 * lpfc_poll_eratt - Error attention polling timer timeout handler 2807 * @ptr: Pointer to address of HBA context object. 2808 * 2809 * This function is invoked by the Error Attention polling timer when the 2810 * timer times out. It will check the SLI Error Attention register for 2811 * possible attention events. If so, it will post an Error Attention event 2812 * and wake up worker thread to process it. Otherwise, it will set up the 2813 * Error Attention polling timer for the next poll. 2814 **/ 2815 void lpfc_poll_eratt(unsigned long ptr) 2816 { 2817 struct lpfc_hba *phba; 2818 uint32_t eratt = 0; 2819 2820 phba = (struct lpfc_hba *)ptr; 2821 2822 /* Check chip HA register for error event */ 2823 eratt = lpfc_sli_check_eratt(phba); 2824 2825 if (eratt) 2826 /* Tell the worker thread there is work to do */ 2827 lpfc_worker_wake_up(phba); 2828 else 2829 /* Restart the timer for next eratt poll */ 2830 mod_timer(&phba->eratt_poll, jiffies + 2831 HZ * LPFC_ERATT_POLL_INTERVAL); 2832 return; 2833 } 2834 2835 2836 /** 2837 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2838 * @phba: Pointer to HBA context object. 2839 * @pring: Pointer to driver SLI ring object. 2840 * @mask: Host attention register mask for this ring. 2841 * 2842 * This function is called from the interrupt context when there is a ring 2843 * event for the fcp ring. The caller does not hold any lock. 2844 * The function processes each response iocb in the response ring until it 2845 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 2846 * LE bit set. The function will call the completion handler of the command iocb 2847 * if the response iocb indicates a completion for a command iocb or it is 2848 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2849 * function if this is an unsolicited iocb. 2850 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2851 * to check it explicitly. 2852 */ 2853 int 2854 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2855 struct lpfc_sli_ring *pring, uint32_t mask) 2856 { 2857 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2858 IOCB_t *irsp = NULL; 2859 IOCB_t *entry = NULL; 2860 struct lpfc_iocbq *cmdiocbq = NULL; 2861 struct lpfc_iocbq rspiocbq; 2862 uint32_t status; 2863 uint32_t portRspPut, portRspMax; 2864 int rc = 1; 2865 lpfc_iocb_type type; 2866 unsigned long iflag; 2867 uint32_t rsp_cmpl = 0; 2868 2869 spin_lock_irqsave(&phba->hbalock, iflag); 2870 pring->stats.iocb_event++; 2871 2872 /* 2873 * The next available response entry should never exceed the maximum 2874 * entries. If it does, treat it as an adapter hardware error. 2875 */ 2876 portRspMax = pring->numRiocb; 2877 portRspPut = le32_to_cpu(pgp->rspPutInx); 2878 if (unlikely(portRspPut >= portRspMax)) { 2879 lpfc_sli_rsp_pointers_error(phba, pring); 2880 spin_unlock_irqrestore(&phba->hbalock, iflag); 2881 return 1; 2882 } 2883 if (phba->fcp_ring_in_use) { 2884 spin_unlock_irqrestore(&phba->hbalock, iflag); 2885 return 1; 2886 } else 2887 phba->fcp_ring_in_use = 1; 2888 2889 rmb(); 2890 while (pring->rspidx != portRspPut) { 2891 /* 2892 * Fetch an entry off the ring and copy it into a local data 2893 * structure. The copy involves a byte-swap since the 2894 * network byte order and pci byte orders are different. 2895 */ 2896 entry = lpfc_resp_iocb(phba, pring); 2897 phba->last_completion_time = jiffies; 2898 2899 if (++pring->rspidx >= portRspMax) 2900 pring->rspidx = 0; 2901 2902 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2903 (uint32_t *) &rspiocbq.iocb, 2904 phba->iocb_rsp_size); 2905 INIT_LIST_HEAD(&(rspiocbq.list)); 2906 irsp = &rspiocbq.iocb; 2907 2908 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2909 pring->stats.iocb_rsp++; 2910 rsp_cmpl++; 2911 2912 if (unlikely(irsp->ulpStatus)) { 2913 /* 2914 * If resource errors reported from HBA, reduce 2915 * queuedepths of the SCSI device. 2916 */ 2917 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2918 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2919 spin_unlock_irqrestore(&phba->hbalock, iflag); 2920 phba->lpfc_rampdown_queue_depth(phba); 2921 spin_lock_irqsave(&phba->hbalock, iflag); 2922 } 2923 2924 /* Rsp ring <ringno> error: IOCB */ 2925 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2926 "0336 Rsp Ring %d error: IOCB Data: " 2927 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 2928 pring->ringno, 2929 irsp->un.ulpWord[0], 2930 irsp->un.ulpWord[1], 2931 irsp->un.ulpWord[2], 2932 irsp->un.ulpWord[3], 2933 irsp->un.ulpWord[4], 2934 irsp->un.ulpWord[5], 2935 *(uint32_t *)&irsp->un1, 2936 *((uint32_t *)&irsp->un1 + 1)); 2937 } 2938 2939 switch (type) { 2940 case LPFC_ABORT_IOCB: 2941 case LPFC_SOL_IOCB: 2942 /* 2943 * Idle exchange closed via ABTS from port. No iocb 2944 * resources need to be recovered. 2945 */ 2946 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 2947 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2948 "0333 IOCB cmd 0x%x" 2949 " processed. Skipping" 2950 " completion\n", 2951 irsp->ulpCommand); 2952 break; 2953 } 2954 2955 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2956 &rspiocbq); 2957 if (unlikely(!cmdiocbq)) 2958 break; 2959 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2960 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2961 if (cmdiocbq->iocb_cmpl) { 2962 spin_unlock_irqrestore(&phba->hbalock, iflag); 2963 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2964 &rspiocbq); 2965 spin_lock_irqsave(&phba->hbalock, iflag); 2966 } 2967 break; 2968 case LPFC_UNSOL_IOCB: 2969 spin_unlock_irqrestore(&phba->hbalock, iflag); 2970 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 2971 spin_lock_irqsave(&phba->hbalock, iflag); 2972 break; 2973 default: 2974 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2975 char adaptermsg[LPFC_MAX_ADPTMSG]; 2976 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2977 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2978 MAX_MSG_DATA); 2979 dev_warn(&((phba->pcidev)->dev), 2980 "lpfc%d: %s\n", 2981 phba->brd_no, adaptermsg); 2982 } else { 2983 /* Unknown IOCB command */ 2984 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2985 "0334 Unknown IOCB command " 2986 "Data: x%x, x%x x%x x%x x%x\n", 2987 type, irsp->ulpCommand, 2988 irsp->ulpStatus, 2989 irsp->ulpIoTag, 2990 irsp->ulpContext); 2991 } 2992 break; 2993 } 2994 2995 /* 2996 * The response IOCB has been processed. Update the ring 2997 * pointer in SLIM. If the port response put pointer has not 2998 * been updated, sync the pgp->rspPutInx and fetch the new port 2999 * response put pointer. 3000 */ 3001 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3002 3003 if (pring->rspidx == portRspPut) 3004 portRspPut = le32_to_cpu(pgp->rspPutInx); 3005 } 3006 3007 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3008 pring->stats.iocb_rsp_full++; 3009 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3010 writel(status, phba->CAregaddr); 3011 readl(phba->CAregaddr); 3012 } 3013 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3014 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3015 pring->stats.iocb_cmd_empty++; 3016 3017 /* Force update of the local copy of cmdGetInx */ 3018 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3019 lpfc_sli_resume_iocb(phba, pring); 3020 3021 if ((pring->lpfc_sli_cmd_available)) 3022 (pring->lpfc_sli_cmd_available) (phba, pring); 3023 3024 } 3025 3026 phba->fcp_ring_in_use = 0; 3027 spin_unlock_irqrestore(&phba->hbalock, iflag); 3028 return rc; 3029 } 3030 3031 /** 3032 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3033 * @phba: Pointer to HBA context object. 3034 * @pring: Pointer to driver SLI ring object. 3035 * @rspiocbp: Pointer to driver response IOCB object. 3036 * 3037 * This function is called from the worker thread when there is a slow-path 3038 * response IOCB to process. This function chains all the response iocbs until 3039 * seeing the iocb with the LE bit set. The function will call 3040 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3041 * completion of a command iocb. The function will call the 3042 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3043 * The function frees the resources or calls the completion handler if this 3044 * iocb is an abort completion. The function returns NULL when the response 3045 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3046 * this function shall chain the iocb on to the iocb_continueq and return the 3047 * response iocb passed in. 3048 **/ 3049 static struct lpfc_iocbq * 3050 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3051 struct lpfc_iocbq *rspiocbp) 3052 { 3053 struct lpfc_iocbq *saveq; 3054 struct lpfc_iocbq *cmdiocbp; 3055 struct lpfc_iocbq *next_iocb; 3056 IOCB_t *irsp = NULL; 3057 uint32_t free_saveq; 3058 uint8_t iocb_cmd_type; 3059 lpfc_iocb_type type; 3060 unsigned long iflag; 3061 int rc; 3062 3063 spin_lock_irqsave(&phba->hbalock, iflag); 3064 /* First add the response iocb to the countinueq list */ 3065 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3066 pring->iocb_continueq_cnt++; 3067 3068 /* Now, determine whether the list is completed for processing */ 3069 irsp = &rspiocbp->iocb; 3070 if (irsp->ulpLe) { 3071 /* 3072 * By default, the driver expects to free all resources 3073 * associated with this iocb completion. 3074 */ 3075 free_saveq = 1; 3076 saveq = list_get_first(&pring->iocb_continueq, 3077 struct lpfc_iocbq, list); 3078 irsp = &(saveq->iocb); 3079 list_del_init(&pring->iocb_continueq); 3080 pring->iocb_continueq_cnt = 0; 3081 3082 pring->stats.iocb_rsp++; 3083 3084 /* 3085 * If resource errors reported from HBA, reduce 3086 * queuedepths of the SCSI device. 3087 */ 3088 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3089 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3090 spin_unlock_irqrestore(&phba->hbalock, iflag); 3091 phba->lpfc_rampdown_queue_depth(phba); 3092 spin_lock_irqsave(&phba->hbalock, iflag); 3093 } 3094 3095 if (irsp->ulpStatus) { 3096 /* Rsp ring <ringno> error: IOCB */ 3097 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3098 "0328 Rsp Ring %d error: " 3099 "IOCB Data: " 3100 "x%x x%x x%x x%x " 3101 "x%x x%x x%x x%x " 3102 "x%x x%x x%x x%x " 3103 "x%x x%x x%x x%x\n", 3104 pring->ringno, 3105 irsp->un.ulpWord[0], 3106 irsp->un.ulpWord[1], 3107 irsp->un.ulpWord[2], 3108 irsp->un.ulpWord[3], 3109 irsp->un.ulpWord[4], 3110 irsp->un.ulpWord[5], 3111 *(((uint32_t *) irsp) + 6), 3112 *(((uint32_t *) irsp) + 7), 3113 *(((uint32_t *) irsp) + 8), 3114 *(((uint32_t *) irsp) + 9), 3115 *(((uint32_t *) irsp) + 10), 3116 *(((uint32_t *) irsp) + 11), 3117 *(((uint32_t *) irsp) + 12), 3118 *(((uint32_t *) irsp) + 13), 3119 *(((uint32_t *) irsp) + 14), 3120 *(((uint32_t *) irsp) + 15)); 3121 } 3122 3123 /* 3124 * Fetch the IOCB command type and call the correct completion 3125 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3126 * get freed back to the lpfc_iocb_list by the discovery 3127 * kernel thread. 3128 */ 3129 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3130 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3131 switch (type) { 3132 case LPFC_SOL_IOCB: 3133 spin_unlock_irqrestore(&phba->hbalock, iflag); 3134 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3135 spin_lock_irqsave(&phba->hbalock, iflag); 3136 break; 3137 3138 case LPFC_UNSOL_IOCB: 3139 spin_unlock_irqrestore(&phba->hbalock, iflag); 3140 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3141 spin_lock_irqsave(&phba->hbalock, iflag); 3142 if (!rc) 3143 free_saveq = 0; 3144 break; 3145 3146 case LPFC_ABORT_IOCB: 3147 cmdiocbp = NULL; 3148 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3149 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3150 saveq); 3151 if (cmdiocbp) { 3152 /* Call the specified completion routine */ 3153 if (cmdiocbp->iocb_cmpl) { 3154 spin_unlock_irqrestore(&phba->hbalock, 3155 iflag); 3156 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3157 saveq); 3158 spin_lock_irqsave(&phba->hbalock, 3159 iflag); 3160 } else 3161 __lpfc_sli_release_iocbq(phba, 3162 cmdiocbp); 3163 } 3164 break; 3165 3166 case LPFC_UNKNOWN_IOCB: 3167 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3168 char adaptermsg[LPFC_MAX_ADPTMSG]; 3169 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3170 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3171 MAX_MSG_DATA); 3172 dev_warn(&((phba->pcidev)->dev), 3173 "lpfc%d: %s\n", 3174 phba->brd_no, adaptermsg); 3175 } else { 3176 /* Unknown IOCB command */ 3177 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3178 "0335 Unknown IOCB " 3179 "command Data: x%x " 3180 "x%x x%x x%x\n", 3181 irsp->ulpCommand, 3182 irsp->ulpStatus, 3183 irsp->ulpIoTag, 3184 irsp->ulpContext); 3185 } 3186 break; 3187 } 3188 3189 if (free_saveq) { 3190 list_for_each_entry_safe(rspiocbp, next_iocb, 3191 &saveq->list, list) { 3192 list_del(&rspiocbp->list); 3193 __lpfc_sli_release_iocbq(phba, rspiocbp); 3194 } 3195 __lpfc_sli_release_iocbq(phba, saveq); 3196 } 3197 rspiocbp = NULL; 3198 } 3199 spin_unlock_irqrestore(&phba->hbalock, iflag); 3200 return rspiocbp; 3201 } 3202 3203 /** 3204 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3205 * @phba: Pointer to HBA context object. 3206 * @pring: Pointer to driver SLI ring object. 3207 * @mask: Host attention register mask for this ring. 3208 * 3209 * This routine wraps the actual slow_ring event process routine from the 3210 * API jump table function pointer from the lpfc_hba struct. 3211 **/ 3212 void 3213 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3214 struct lpfc_sli_ring *pring, uint32_t mask) 3215 { 3216 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3217 } 3218 3219 /** 3220 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3221 * @phba: Pointer to HBA context object. 3222 * @pring: Pointer to driver SLI ring object. 3223 * @mask: Host attention register mask for this ring. 3224 * 3225 * This function is called from the worker thread when there is a ring event 3226 * for non-fcp rings. The caller does not hold any lock. The function will 3227 * remove each response iocb in the response ring and calls the handle 3228 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3229 **/ 3230 static void 3231 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3232 struct lpfc_sli_ring *pring, uint32_t mask) 3233 { 3234 struct lpfc_pgp *pgp; 3235 IOCB_t *entry; 3236 IOCB_t *irsp = NULL; 3237 struct lpfc_iocbq *rspiocbp = NULL; 3238 uint32_t portRspPut, portRspMax; 3239 unsigned long iflag; 3240 uint32_t status; 3241 3242 pgp = &phba->port_gp[pring->ringno]; 3243 spin_lock_irqsave(&phba->hbalock, iflag); 3244 pring->stats.iocb_event++; 3245 3246 /* 3247 * The next available response entry should never exceed the maximum 3248 * entries. If it does, treat it as an adapter hardware error. 3249 */ 3250 portRspMax = pring->numRiocb; 3251 portRspPut = le32_to_cpu(pgp->rspPutInx); 3252 if (portRspPut >= portRspMax) { 3253 /* 3254 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3255 * rsp ring <portRspMax> 3256 */ 3257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3258 "0303 Ring %d handler: portRspPut %d " 3259 "is bigger than rsp ring %d\n", 3260 pring->ringno, portRspPut, portRspMax); 3261 3262 phba->link_state = LPFC_HBA_ERROR; 3263 spin_unlock_irqrestore(&phba->hbalock, iflag); 3264 3265 phba->work_hs = HS_FFER3; 3266 lpfc_handle_eratt(phba); 3267 3268 return; 3269 } 3270 3271 rmb(); 3272 while (pring->rspidx != portRspPut) { 3273 /* 3274 * Build a completion list and call the appropriate handler. 3275 * The process is to get the next available response iocb, get 3276 * a free iocb from the list, copy the response data into the 3277 * free iocb, insert to the continuation list, and update the 3278 * next response index to slim. This process makes response 3279 * iocb's in the ring available to DMA as fast as possible but 3280 * pays a penalty for a copy operation. Since the iocb is 3281 * only 32 bytes, this penalty is considered small relative to 3282 * the PCI reads for register values and a slim write. When 3283 * the ulpLe field is set, the entire Command has been 3284 * received. 3285 */ 3286 entry = lpfc_resp_iocb(phba, pring); 3287 3288 phba->last_completion_time = jiffies; 3289 rspiocbp = __lpfc_sli_get_iocbq(phba); 3290 if (rspiocbp == NULL) { 3291 printk(KERN_ERR "%s: out of buffers! Failing " 3292 "completion.\n", __func__); 3293 break; 3294 } 3295 3296 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3297 phba->iocb_rsp_size); 3298 irsp = &rspiocbp->iocb; 3299 3300 if (++pring->rspidx >= portRspMax) 3301 pring->rspidx = 0; 3302 3303 if (pring->ringno == LPFC_ELS_RING) { 3304 lpfc_debugfs_slow_ring_trc(phba, 3305 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3306 *(((uint32_t *) irsp) + 4), 3307 *(((uint32_t *) irsp) + 6), 3308 *(((uint32_t *) irsp) + 7)); 3309 } 3310 3311 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3312 3313 spin_unlock_irqrestore(&phba->hbalock, iflag); 3314 /* Handle the response IOCB */ 3315 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3316 spin_lock_irqsave(&phba->hbalock, iflag); 3317 3318 /* 3319 * If the port response put pointer has not been updated, sync 3320 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3321 * response put pointer. 3322 */ 3323 if (pring->rspidx == portRspPut) { 3324 portRspPut = le32_to_cpu(pgp->rspPutInx); 3325 } 3326 } /* while (pring->rspidx != portRspPut) */ 3327 3328 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3329 /* At least one response entry has been freed */ 3330 pring->stats.iocb_rsp_full++; 3331 /* SET RxRE_RSP in Chip Att register */ 3332 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3333 writel(status, phba->CAregaddr); 3334 readl(phba->CAregaddr); /* flush */ 3335 } 3336 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3337 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3338 pring->stats.iocb_cmd_empty++; 3339 3340 /* Force update of the local copy of cmdGetInx */ 3341 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3342 lpfc_sli_resume_iocb(phba, pring); 3343 3344 if ((pring->lpfc_sli_cmd_available)) 3345 (pring->lpfc_sli_cmd_available) (phba, pring); 3346 3347 } 3348 3349 spin_unlock_irqrestore(&phba->hbalock, iflag); 3350 return; 3351 } 3352 3353 /** 3354 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3355 * @phba: Pointer to HBA context object. 3356 * @pring: Pointer to driver SLI ring object. 3357 * @mask: Host attention register mask for this ring. 3358 * 3359 * This function is called from the worker thread when there is a pending 3360 * ELS response iocb on the driver internal slow-path response iocb worker 3361 * queue. The caller does not hold any lock. The function will remove each 3362 * response iocb from the response worker queue and calls the handle 3363 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3364 **/ 3365 static void 3366 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3367 struct lpfc_sli_ring *pring, uint32_t mask) 3368 { 3369 struct lpfc_iocbq *irspiocbq; 3370 struct hbq_dmabuf *dmabuf; 3371 struct lpfc_cq_event *cq_event; 3372 unsigned long iflag; 3373 3374 spin_lock_irqsave(&phba->hbalock, iflag); 3375 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3376 spin_unlock_irqrestore(&phba->hbalock, iflag); 3377 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3378 /* Get the response iocb from the head of work queue */ 3379 spin_lock_irqsave(&phba->hbalock, iflag); 3380 list_remove_head(&phba->sli4_hba.sp_queue_event, 3381 cq_event, struct lpfc_cq_event, list); 3382 spin_unlock_irqrestore(&phba->hbalock, iflag); 3383 3384 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3385 case CQE_CODE_COMPL_WQE: 3386 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3387 cq_event); 3388 /* Translate ELS WCQE to response IOCBQ */ 3389 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3390 irspiocbq); 3391 if (irspiocbq) 3392 lpfc_sli_sp_handle_rspiocb(phba, pring, 3393 irspiocbq); 3394 break; 3395 case CQE_CODE_RECEIVE: 3396 case CQE_CODE_RECEIVE_V1: 3397 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3398 cq_event); 3399 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3400 break; 3401 default: 3402 break; 3403 } 3404 } 3405 } 3406 3407 /** 3408 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3409 * @phba: Pointer to HBA context object. 3410 * @pring: Pointer to driver SLI ring object. 3411 * 3412 * This function aborts all iocbs in the given ring and frees all the iocb 3413 * objects in txq. This function issues an abort iocb for all the iocb commands 3414 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3415 * the return of this function. The caller is not required to hold any locks. 3416 **/ 3417 void 3418 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3419 { 3420 LIST_HEAD(completions); 3421 struct lpfc_iocbq *iocb, *next_iocb; 3422 3423 if (pring->ringno == LPFC_ELS_RING) { 3424 lpfc_fabric_abort_hba(phba); 3425 } 3426 3427 /* Error everything on txq and txcmplq 3428 * First do the txq. 3429 */ 3430 spin_lock_irq(&phba->hbalock); 3431 list_splice_init(&pring->txq, &completions); 3432 pring->txq_cnt = 0; 3433 3434 /* Next issue ABTS for everything on the txcmplq */ 3435 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3436 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3437 3438 spin_unlock_irq(&phba->hbalock); 3439 3440 /* Cancel all the IOCBs from the completions list */ 3441 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3442 IOERR_SLI_ABORTED); 3443 } 3444 3445 /** 3446 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3447 * @phba: Pointer to HBA context object. 3448 * 3449 * This function flushes all iocbs in the fcp ring and frees all the iocb 3450 * objects in txq and txcmplq. This function will not issue abort iocbs 3451 * for all the iocb commands in txcmplq, they will just be returned with 3452 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3453 * slot has been permanently disabled. 3454 **/ 3455 void 3456 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3457 { 3458 LIST_HEAD(txq); 3459 LIST_HEAD(txcmplq); 3460 struct lpfc_sli *psli = &phba->sli; 3461 struct lpfc_sli_ring *pring; 3462 3463 /* Currently, only one fcp ring */ 3464 pring = &psli->ring[psli->fcp_ring]; 3465 3466 spin_lock_irq(&phba->hbalock); 3467 /* Retrieve everything on txq */ 3468 list_splice_init(&pring->txq, &txq); 3469 pring->txq_cnt = 0; 3470 3471 /* Retrieve everything on the txcmplq */ 3472 list_splice_init(&pring->txcmplq, &txcmplq); 3473 pring->txcmplq_cnt = 0; 3474 3475 /* Indicate the I/O queues are flushed */ 3476 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3477 spin_unlock_irq(&phba->hbalock); 3478 3479 /* Flush the txq */ 3480 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3481 IOERR_SLI_DOWN); 3482 3483 /* Flush the txcmpq */ 3484 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3485 IOERR_SLI_DOWN); 3486 } 3487 3488 /** 3489 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3490 * @phba: Pointer to HBA context object. 3491 * @mask: Bit mask to be checked. 3492 * 3493 * This function reads the host status register and compares 3494 * with the provided bit mask to check if HBA completed 3495 * the restart. This function will wait in a loop for the 3496 * HBA to complete restart. If the HBA does not restart within 3497 * 15 iterations, the function will reset the HBA again. The 3498 * function returns 1 when HBA fail to restart otherwise returns 3499 * zero. 3500 **/ 3501 static int 3502 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3503 { 3504 uint32_t status; 3505 int i = 0; 3506 int retval = 0; 3507 3508 /* Read the HBA Host Status Register */ 3509 if (lpfc_readl(phba->HSregaddr, &status)) 3510 return 1; 3511 3512 /* 3513 * Check status register every 100ms for 5 retries, then every 3514 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3515 * every 2.5 sec for 4. 3516 * Break our of the loop if errors occurred during init. 3517 */ 3518 while (((status & mask) != mask) && 3519 !(status & HS_FFERM) && 3520 i++ < 20) { 3521 3522 if (i <= 5) 3523 msleep(10); 3524 else if (i <= 10) 3525 msleep(500); 3526 else 3527 msleep(2500); 3528 3529 if (i == 15) { 3530 /* Do post */ 3531 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3532 lpfc_sli_brdrestart(phba); 3533 } 3534 /* Read the HBA Host Status Register */ 3535 if (lpfc_readl(phba->HSregaddr, &status)) { 3536 retval = 1; 3537 break; 3538 } 3539 } 3540 3541 /* Check to see if any errors occurred during init */ 3542 if ((status & HS_FFERM) || (i >= 20)) { 3543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3544 "2751 Adapter failed to restart, " 3545 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3546 status, 3547 readl(phba->MBslimaddr + 0xa8), 3548 readl(phba->MBslimaddr + 0xac)); 3549 phba->link_state = LPFC_HBA_ERROR; 3550 retval = 1; 3551 } 3552 3553 return retval; 3554 } 3555 3556 /** 3557 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3558 * @phba: Pointer to HBA context object. 3559 * @mask: Bit mask to be checked. 3560 * 3561 * This function checks the host status register to check if HBA is 3562 * ready. This function will wait in a loop for the HBA to be ready 3563 * If the HBA is not ready , the function will will reset the HBA PCI 3564 * function again. The function returns 1 when HBA fail to be ready 3565 * otherwise returns zero. 3566 **/ 3567 static int 3568 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3569 { 3570 uint32_t status; 3571 int retval = 0; 3572 3573 /* Read the HBA Host Status Register */ 3574 status = lpfc_sli4_post_status_check(phba); 3575 3576 if (status) { 3577 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3578 lpfc_sli_brdrestart(phba); 3579 status = lpfc_sli4_post_status_check(phba); 3580 } 3581 3582 /* Check to see if any errors occurred during init */ 3583 if (status) { 3584 phba->link_state = LPFC_HBA_ERROR; 3585 retval = 1; 3586 } else 3587 phba->sli4_hba.intr_enable = 0; 3588 3589 return retval; 3590 } 3591 3592 /** 3593 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3594 * @phba: Pointer to HBA context object. 3595 * @mask: Bit mask to be checked. 3596 * 3597 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3598 * from the API jump table function pointer from the lpfc_hba struct. 3599 **/ 3600 int 3601 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3602 { 3603 return phba->lpfc_sli_brdready(phba, mask); 3604 } 3605 3606 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3607 3608 /** 3609 * lpfc_reset_barrier - Make HBA ready for HBA reset 3610 * @phba: Pointer to HBA context object. 3611 * 3612 * This function is called before resetting an HBA. This function is called 3613 * with hbalock held and requests HBA to quiesce DMAs before a reset. 3614 **/ 3615 void lpfc_reset_barrier(struct lpfc_hba *phba) 3616 { 3617 uint32_t __iomem *resp_buf; 3618 uint32_t __iomem *mbox_buf; 3619 volatile uint32_t mbox; 3620 uint32_t hc_copy, ha_copy, resp_data; 3621 int i; 3622 uint8_t hdrtype; 3623 3624 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3625 if (hdrtype != 0x80 || 3626 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3627 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3628 return; 3629 3630 /* 3631 * Tell the other part of the chip to suspend temporarily all 3632 * its DMA activity. 3633 */ 3634 resp_buf = phba->MBslimaddr; 3635 3636 /* Disable the error attention */ 3637 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3638 return; 3639 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3640 readl(phba->HCregaddr); /* flush */ 3641 phba->link_flag |= LS_IGNORE_ERATT; 3642 3643 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3644 return; 3645 if (ha_copy & HA_ERATT) { 3646 /* Clear Chip error bit */ 3647 writel(HA_ERATT, phba->HAregaddr); 3648 phba->pport->stopped = 1; 3649 } 3650 3651 mbox = 0; 3652 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3653 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3654 3655 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3656 mbox_buf = phba->MBslimaddr; 3657 writel(mbox, mbox_buf); 3658 3659 for (i = 0; i < 50; i++) { 3660 if (lpfc_readl((resp_buf + 1), &resp_data)) 3661 return; 3662 if (resp_data != ~(BARRIER_TEST_PATTERN)) 3663 mdelay(1); 3664 else 3665 break; 3666 } 3667 resp_data = 0; 3668 if (lpfc_readl((resp_buf + 1), &resp_data)) 3669 return; 3670 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3671 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3672 phba->pport->stopped) 3673 goto restore_hc; 3674 else 3675 goto clear_errat; 3676 } 3677 3678 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3679 resp_data = 0; 3680 for (i = 0; i < 500; i++) { 3681 if (lpfc_readl(resp_buf, &resp_data)) 3682 return; 3683 if (resp_data != mbox) 3684 mdelay(1); 3685 else 3686 break; 3687 } 3688 3689 clear_errat: 3690 3691 while (++i < 500) { 3692 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3693 return; 3694 if (!(ha_copy & HA_ERATT)) 3695 mdelay(1); 3696 else 3697 break; 3698 } 3699 3700 if (readl(phba->HAregaddr) & HA_ERATT) { 3701 writel(HA_ERATT, phba->HAregaddr); 3702 phba->pport->stopped = 1; 3703 } 3704 3705 restore_hc: 3706 phba->link_flag &= ~LS_IGNORE_ERATT; 3707 writel(hc_copy, phba->HCregaddr); 3708 readl(phba->HCregaddr); /* flush */ 3709 } 3710 3711 /** 3712 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3713 * @phba: Pointer to HBA context object. 3714 * 3715 * This function issues a kill_board mailbox command and waits for 3716 * the error attention interrupt. This function is called for stopping 3717 * the firmware processing. The caller is not required to hold any 3718 * locks. This function calls lpfc_hba_down_post function to free 3719 * any pending commands after the kill. The function will return 1 when it 3720 * fails to kill the board else will return 0. 3721 **/ 3722 int 3723 lpfc_sli_brdkill(struct lpfc_hba *phba) 3724 { 3725 struct lpfc_sli *psli; 3726 LPFC_MBOXQ_t *pmb; 3727 uint32_t status; 3728 uint32_t ha_copy; 3729 int retval; 3730 int i = 0; 3731 3732 psli = &phba->sli; 3733 3734 /* Kill HBA */ 3735 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3736 "0329 Kill HBA Data: x%x x%x\n", 3737 phba->pport->port_state, psli->sli_flag); 3738 3739 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3740 if (!pmb) 3741 return 1; 3742 3743 /* Disable the error attention */ 3744 spin_lock_irq(&phba->hbalock); 3745 if (lpfc_readl(phba->HCregaddr, &status)) { 3746 spin_unlock_irq(&phba->hbalock); 3747 mempool_free(pmb, phba->mbox_mem_pool); 3748 return 1; 3749 } 3750 status &= ~HC_ERINT_ENA; 3751 writel(status, phba->HCregaddr); 3752 readl(phba->HCregaddr); /* flush */ 3753 phba->link_flag |= LS_IGNORE_ERATT; 3754 spin_unlock_irq(&phba->hbalock); 3755 3756 lpfc_kill_board(phba, pmb); 3757 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3758 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3759 3760 if (retval != MBX_SUCCESS) { 3761 if (retval != MBX_BUSY) 3762 mempool_free(pmb, phba->mbox_mem_pool); 3763 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3764 "2752 KILL_BOARD command failed retval %d\n", 3765 retval); 3766 spin_lock_irq(&phba->hbalock); 3767 phba->link_flag &= ~LS_IGNORE_ERATT; 3768 spin_unlock_irq(&phba->hbalock); 3769 return 1; 3770 } 3771 3772 spin_lock_irq(&phba->hbalock); 3773 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3774 spin_unlock_irq(&phba->hbalock); 3775 3776 mempool_free(pmb, phba->mbox_mem_pool); 3777 3778 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3779 * attention every 100ms for 3 seconds. If we don't get ERATT after 3780 * 3 seconds we still set HBA_ERROR state because the status of the 3781 * board is now undefined. 3782 */ 3783 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3784 return 1; 3785 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3786 mdelay(100); 3787 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3788 return 1; 3789 } 3790 3791 del_timer_sync(&psli->mbox_tmo); 3792 if (ha_copy & HA_ERATT) { 3793 writel(HA_ERATT, phba->HAregaddr); 3794 phba->pport->stopped = 1; 3795 } 3796 spin_lock_irq(&phba->hbalock); 3797 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3798 psli->mbox_active = NULL; 3799 phba->link_flag &= ~LS_IGNORE_ERATT; 3800 spin_unlock_irq(&phba->hbalock); 3801 3802 lpfc_hba_down_post(phba); 3803 phba->link_state = LPFC_HBA_ERROR; 3804 3805 return ha_copy & HA_ERATT ? 0 : 1; 3806 } 3807 3808 /** 3809 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3810 * @phba: Pointer to HBA context object. 3811 * 3812 * This function resets the HBA by writing HC_INITFF to the control 3813 * register. After the HBA resets, this function resets all the iocb ring 3814 * indices. This function disables PCI layer parity checking during 3815 * the reset. 3816 * This function returns 0 always. 3817 * The caller is not required to hold any locks. 3818 **/ 3819 int 3820 lpfc_sli_brdreset(struct lpfc_hba *phba) 3821 { 3822 struct lpfc_sli *psli; 3823 struct lpfc_sli_ring *pring; 3824 uint16_t cfg_value; 3825 int i; 3826 3827 psli = &phba->sli; 3828 3829 /* Reset HBA */ 3830 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3831 "0325 Reset HBA Data: x%x x%x\n", 3832 phba->pport->port_state, psli->sli_flag); 3833 3834 /* perform board reset */ 3835 phba->fc_eventTag = 0; 3836 phba->link_events = 0; 3837 phba->pport->fc_myDID = 0; 3838 phba->pport->fc_prevDID = 0; 3839 3840 /* Turn off parity checking and serr during the physical reset */ 3841 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3842 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3843 (cfg_value & 3844 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3845 3846 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3847 3848 /* Now toggle INITFF bit in the Host Control Register */ 3849 writel(HC_INITFF, phba->HCregaddr); 3850 mdelay(1); 3851 readl(phba->HCregaddr); /* flush */ 3852 writel(0, phba->HCregaddr); 3853 readl(phba->HCregaddr); /* flush */ 3854 3855 /* Restore PCI cmd register */ 3856 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3857 3858 /* Initialize relevant SLI info */ 3859 for (i = 0; i < psli->num_rings; i++) { 3860 pring = &psli->ring[i]; 3861 pring->flag = 0; 3862 pring->rspidx = 0; 3863 pring->next_cmdidx = 0; 3864 pring->local_getidx = 0; 3865 pring->cmdidx = 0; 3866 pring->missbufcnt = 0; 3867 } 3868 3869 phba->link_state = LPFC_WARM_START; 3870 return 0; 3871 } 3872 3873 /** 3874 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3875 * @phba: Pointer to HBA context object. 3876 * 3877 * This function resets a SLI4 HBA. This function disables PCI layer parity 3878 * checking during resets the device. The caller is not required to hold 3879 * any locks. 3880 * 3881 * This function returns 0 always. 3882 **/ 3883 int 3884 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3885 { 3886 struct lpfc_sli *psli = &phba->sli; 3887 uint16_t cfg_value; 3888 int rc; 3889 3890 /* Reset HBA */ 3891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3892 "0295 Reset HBA Data: x%x x%x\n", 3893 phba->pport->port_state, psli->sli_flag); 3894 3895 /* perform board reset */ 3896 phba->fc_eventTag = 0; 3897 phba->link_events = 0; 3898 phba->pport->fc_myDID = 0; 3899 phba->pport->fc_prevDID = 0; 3900 3901 spin_lock_irq(&phba->hbalock); 3902 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3903 phba->fcf.fcf_flag = 0; 3904 spin_unlock_irq(&phba->hbalock); 3905 3906 /* Now physically reset the device */ 3907 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3908 "0389 Performing PCI function reset!\n"); 3909 3910 /* Turn off parity checking and serr during the physical reset */ 3911 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3912 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3913 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3914 3915 /* Perform FCoE PCI function reset */ 3916 lpfc_sli4_queue_destroy(phba); 3917 rc = lpfc_pci_function_reset(phba); 3918 3919 /* Restore PCI cmd register */ 3920 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3921 3922 return rc; 3923 } 3924 3925 /** 3926 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3927 * @phba: Pointer to HBA context object. 3928 * 3929 * This function is called in the SLI initialization code path to 3930 * restart the HBA. The caller is not required to hold any lock. 3931 * This function writes MBX_RESTART mailbox command to the SLIM and 3932 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 3933 * function to free any pending commands. The function enables 3934 * POST only during the first initialization. The function returns zero. 3935 * The function does not guarantee completion of MBX_RESTART mailbox 3936 * command before the return of this function. 3937 **/ 3938 static int 3939 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 3940 { 3941 MAILBOX_t *mb; 3942 struct lpfc_sli *psli; 3943 volatile uint32_t word0; 3944 void __iomem *to_slim; 3945 uint32_t hba_aer_enabled; 3946 3947 spin_lock_irq(&phba->hbalock); 3948 3949 /* Take PCIe device Advanced Error Reporting (AER) state */ 3950 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3951 3952 psli = &phba->sli; 3953 3954 /* Restart HBA */ 3955 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3956 "0337 Restart HBA Data: x%x x%x\n", 3957 phba->pport->port_state, psli->sli_flag); 3958 3959 word0 = 0; 3960 mb = (MAILBOX_t *) &word0; 3961 mb->mbxCommand = MBX_RESTART; 3962 mb->mbxHc = 1; 3963 3964 lpfc_reset_barrier(phba); 3965 3966 to_slim = phba->MBslimaddr; 3967 writel(*(uint32_t *) mb, to_slim); 3968 readl(to_slim); /* flush */ 3969 3970 /* Only skip post after fc_ffinit is completed */ 3971 if (phba->pport->port_state) 3972 word0 = 1; /* This is really setting up word1 */ 3973 else 3974 word0 = 0; /* This is really setting up word1 */ 3975 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3976 writel(*(uint32_t *) mb, to_slim); 3977 readl(to_slim); /* flush */ 3978 3979 lpfc_sli_brdreset(phba); 3980 phba->pport->stopped = 0; 3981 phba->link_state = LPFC_INIT_START; 3982 phba->hba_flag = 0; 3983 spin_unlock_irq(&phba->hbalock); 3984 3985 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3986 psli->stats_start = get_seconds(); 3987 3988 /* Give the INITFF and Post time to settle. */ 3989 mdelay(100); 3990 3991 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3992 if (hba_aer_enabled) 3993 pci_disable_pcie_error_reporting(phba->pcidev); 3994 3995 lpfc_hba_down_post(phba); 3996 3997 return 0; 3998 } 3999 4000 /** 4001 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4002 * @phba: Pointer to HBA context object. 4003 * 4004 * This function is called in the SLI initialization code path to restart 4005 * a SLI4 HBA. The caller is not required to hold any lock. 4006 * At the end of the function, it calls lpfc_hba_down_post function to 4007 * free any pending commands. 4008 **/ 4009 static int 4010 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4011 { 4012 struct lpfc_sli *psli = &phba->sli; 4013 uint32_t hba_aer_enabled; 4014 int rc; 4015 4016 /* Restart HBA */ 4017 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4018 "0296 Restart HBA Data: x%x x%x\n", 4019 phba->pport->port_state, psli->sli_flag); 4020 4021 /* Take PCIe device Advanced Error Reporting (AER) state */ 4022 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4023 4024 rc = lpfc_sli4_brdreset(phba); 4025 4026 spin_lock_irq(&phba->hbalock); 4027 phba->pport->stopped = 0; 4028 phba->link_state = LPFC_INIT_START; 4029 phba->hba_flag = 0; 4030 spin_unlock_irq(&phba->hbalock); 4031 4032 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4033 psli->stats_start = get_seconds(); 4034 4035 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4036 if (hba_aer_enabled) 4037 pci_disable_pcie_error_reporting(phba->pcidev); 4038 4039 lpfc_hba_down_post(phba); 4040 4041 return rc; 4042 } 4043 4044 /** 4045 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4046 * @phba: Pointer to HBA context object. 4047 * 4048 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4049 * API jump table function pointer from the lpfc_hba struct. 4050 **/ 4051 int 4052 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4053 { 4054 return phba->lpfc_sli_brdrestart(phba); 4055 } 4056 4057 /** 4058 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4059 * @phba: Pointer to HBA context object. 4060 * 4061 * This function is called after a HBA restart to wait for successful 4062 * restart of the HBA. Successful restart of the HBA is indicated by 4063 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4064 * iteration, the function will restart the HBA again. The function returns 4065 * zero if HBA successfully restarted else returns negative error code. 4066 **/ 4067 static int 4068 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4069 { 4070 uint32_t status, i = 0; 4071 4072 /* Read the HBA Host Status Register */ 4073 if (lpfc_readl(phba->HSregaddr, &status)) 4074 return -EIO; 4075 4076 /* Check status register to see what current state is */ 4077 i = 0; 4078 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4079 4080 /* Check every 10ms for 10 retries, then every 100ms for 90 4081 * retries, then every 1 sec for 50 retires for a total of 4082 * ~60 seconds before reset the board again and check every 4083 * 1 sec for 50 retries. The up to 60 seconds before the 4084 * board ready is required by the Falcon FIPS zeroization 4085 * complete, and any reset the board in between shall cause 4086 * restart of zeroization, further delay the board ready. 4087 */ 4088 if (i++ >= 200) { 4089 /* Adapter failed to init, timeout, status reg 4090 <status> */ 4091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4092 "0436 Adapter failed to init, " 4093 "timeout, status reg x%x, " 4094 "FW Data: A8 x%x AC x%x\n", status, 4095 readl(phba->MBslimaddr + 0xa8), 4096 readl(phba->MBslimaddr + 0xac)); 4097 phba->link_state = LPFC_HBA_ERROR; 4098 return -ETIMEDOUT; 4099 } 4100 4101 /* Check to see if any errors occurred during init */ 4102 if (status & HS_FFERM) { 4103 /* ERROR: During chipset initialization */ 4104 /* Adapter failed to init, chipset, status reg 4105 <status> */ 4106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4107 "0437 Adapter failed to init, " 4108 "chipset, status reg x%x, " 4109 "FW Data: A8 x%x AC x%x\n", status, 4110 readl(phba->MBslimaddr + 0xa8), 4111 readl(phba->MBslimaddr + 0xac)); 4112 phba->link_state = LPFC_HBA_ERROR; 4113 return -EIO; 4114 } 4115 4116 if (i <= 10) 4117 msleep(10); 4118 else if (i <= 100) 4119 msleep(100); 4120 else 4121 msleep(1000); 4122 4123 if (i == 150) { 4124 /* Do post */ 4125 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4126 lpfc_sli_brdrestart(phba); 4127 } 4128 /* Read the HBA Host Status Register */ 4129 if (lpfc_readl(phba->HSregaddr, &status)) 4130 return -EIO; 4131 } 4132 4133 /* Check to see if any errors occurred during init */ 4134 if (status & HS_FFERM) { 4135 /* ERROR: During chipset initialization */ 4136 /* Adapter failed to init, chipset, status reg <status> */ 4137 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4138 "0438 Adapter failed to init, chipset, " 4139 "status reg x%x, " 4140 "FW Data: A8 x%x AC x%x\n", status, 4141 readl(phba->MBslimaddr + 0xa8), 4142 readl(phba->MBslimaddr + 0xac)); 4143 phba->link_state = LPFC_HBA_ERROR; 4144 return -EIO; 4145 } 4146 4147 /* Clear all interrupt enable conditions */ 4148 writel(0, phba->HCregaddr); 4149 readl(phba->HCregaddr); /* flush */ 4150 4151 /* setup host attn register */ 4152 writel(0xffffffff, phba->HAregaddr); 4153 readl(phba->HAregaddr); /* flush */ 4154 return 0; 4155 } 4156 4157 /** 4158 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4159 * 4160 * This function calculates and returns the number of HBQs required to be 4161 * configured. 4162 **/ 4163 int 4164 lpfc_sli_hbq_count(void) 4165 { 4166 return ARRAY_SIZE(lpfc_hbq_defs); 4167 } 4168 4169 /** 4170 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4171 * 4172 * This function adds the number of hbq entries in every HBQ to get 4173 * the total number of hbq entries required for the HBA and returns 4174 * the total count. 4175 **/ 4176 static int 4177 lpfc_sli_hbq_entry_count(void) 4178 { 4179 int hbq_count = lpfc_sli_hbq_count(); 4180 int count = 0; 4181 int i; 4182 4183 for (i = 0; i < hbq_count; ++i) 4184 count += lpfc_hbq_defs[i]->entry_count; 4185 return count; 4186 } 4187 4188 /** 4189 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4190 * 4191 * This function calculates amount of memory required for all hbq entries 4192 * to be configured and returns the total memory required. 4193 **/ 4194 int 4195 lpfc_sli_hbq_size(void) 4196 { 4197 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4198 } 4199 4200 /** 4201 * lpfc_sli_hbq_setup - configure and initialize HBQs 4202 * @phba: Pointer to HBA context object. 4203 * 4204 * This function is called during the SLI initialization to configure 4205 * all the HBQs and post buffers to the HBQ. The caller is not 4206 * required to hold any locks. This function will return zero if successful 4207 * else it will return negative error code. 4208 **/ 4209 static int 4210 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4211 { 4212 int hbq_count = lpfc_sli_hbq_count(); 4213 LPFC_MBOXQ_t *pmb; 4214 MAILBOX_t *pmbox; 4215 uint32_t hbqno; 4216 uint32_t hbq_entry_index; 4217 4218 /* Get a Mailbox buffer to setup mailbox 4219 * commands for HBA initialization 4220 */ 4221 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4222 4223 if (!pmb) 4224 return -ENOMEM; 4225 4226 pmbox = &pmb->u.mb; 4227 4228 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4229 phba->link_state = LPFC_INIT_MBX_CMDS; 4230 phba->hbq_in_use = 1; 4231 4232 hbq_entry_index = 0; 4233 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4234 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4235 phba->hbqs[hbqno].hbqPutIdx = 0; 4236 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4237 phba->hbqs[hbqno].entry_count = 4238 lpfc_hbq_defs[hbqno]->entry_count; 4239 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4240 hbq_entry_index, pmb); 4241 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4242 4243 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4244 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4245 mbxStatus <status>, ring <num> */ 4246 4247 lpfc_printf_log(phba, KERN_ERR, 4248 LOG_SLI | LOG_VPORT, 4249 "1805 Adapter failed to init. " 4250 "Data: x%x x%x x%x\n", 4251 pmbox->mbxCommand, 4252 pmbox->mbxStatus, hbqno); 4253 4254 phba->link_state = LPFC_HBA_ERROR; 4255 mempool_free(pmb, phba->mbox_mem_pool); 4256 return -ENXIO; 4257 } 4258 } 4259 phba->hbq_count = hbq_count; 4260 4261 mempool_free(pmb, phba->mbox_mem_pool); 4262 4263 /* Initially populate or replenish the HBQs */ 4264 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4265 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4266 return 0; 4267 } 4268 4269 /** 4270 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4271 * @phba: Pointer to HBA context object. 4272 * 4273 * This function is called during the SLI initialization to configure 4274 * all the HBQs and post buffers to the HBQ. The caller is not 4275 * required to hold any locks. This function will return zero if successful 4276 * else it will return negative error code. 4277 **/ 4278 static int 4279 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4280 { 4281 phba->hbq_in_use = 1; 4282 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4283 phba->hbq_count = 1; 4284 /* Initially populate or replenish the HBQs */ 4285 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4286 return 0; 4287 } 4288 4289 /** 4290 * lpfc_sli_config_port - Issue config port mailbox command 4291 * @phba: Pointer to HBA context object. 4292 * @sli_mode: sli mode - 2/3 4293 * 4294 * This function is called by the sli intialization code path 4295 * to issue config_port mailbox command. This function restarts the 4296 * HBA firmware and issues a config_port mailbox command to configure 4297 * the SLI interface in the sli mode specified by sli_mode 4298 * variable. The caller is not required to hold any locks. 4299 * The function returns 0 if successful, else returns negative error 4300 * code. 4301 **/ 4302 int 4303 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4304 { 4305 LPFC_MBOXQ_t *pmb; 4306 uint32_t resetcount = 0, rc = 0, done = 0; 4307 4308 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4309 if (!pmb) { 4310 phba->link_state = LPFC_HBA_ERROR; 4311 return -ENOMEM; 4312 } 4313 4314 phba->sli_rev = sli_mode; 4315 while (resetcount < 2 && !done) { 4316 spin_lock_irq(&phba->hbalock); 4317 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4318 spin_unlock_irq(&phba->hbalock); 4319 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4320 lpfc_sli_brdrestart(phba); 4321 rc = lpfc_sli_chipset_init(phba); 4322 if (rc) 4323 break; 4324 4325 spin_lock_irq(&phba->hbalock); 4326 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4327 spin_unlock_irq(&phba->hbalock); 4328 resetcount++; 4329 4330 /* Call pre CONFIG_PORT mailbox command initialization. A 4331 * value of 0 means the call was successful. Any other 4332 * nonzero value is a failure, but if ERESTART is returned, 4333 * the driver may reset the HBA and try again. 4334 */ 4335 rc = lpfc_config_port_prep(phba); 4336 if (rc == -ERESTART) { 4337 phba->link_state = LPFC_LINK_UNKNOWN; 4338 continue; 4339 } else if (rc) 4340 break; 4341 4342 phba->link_state = LPFC_INIT_MBX_CMDS; 4343 lpfc_config_port(phba, pmb); 4344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4345 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4346 LPFC_SLI3_HBQ_ENABLED | 4347 LPFC_SLI3_CRP_ENABLED | 4348 LPFC_SLI3_BG_ENABLED | 4349 LPFC_SLI3_DSS_ENABLED); 4350 if (rc != MBX_SUCCESS) { 4351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4352 "0442 Adapter failed to init, mbxCmd x%x " 4353 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4354 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4355 spin_lock_irq(&phba->hbalock); 4356 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4357 spin_unlock_irq(&phba->hbalock); 4358 rc = -ENXIO; 4359 } else { 4360 /* Allow asynchronous mailbox command to go through */ 4361 spin_lock_irq(&phba->hbalock); 4362 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4363 spin_unlock_irq(&phba->hbalock); 4364 done = 1; 4365 4366 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4367 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4368 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4369 "3110 Port did not grant ASABT\n"); 4370 } 4371 } 4372 if (!done) { 4373 rc = -EINVAL; 4374 goto do_prep_failed; 4375 } 4376 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4377 if (!pmb->u.mb.un.varCfgPort.cMA) { 4378 rc = -ENXIO; 4379 goto do_prep_failed; 4380 } 4381 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4382 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4383 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4384 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4385 phba->max_vpi : phba->max_vports; 4386 4387 } else 4388 phba->max_vpi = 0; 4389 phba->fips_level = 0; 4390 phba->fips_spec_rev = 0; 4391 if (pmb->u.mb.un.varCfgPort.gdss) { 4392 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4393 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4394 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4396 "2850 Security Crypto Active. FIPS x%d " 4397 "(Spec Rev: x%d)", 4398 phba->fips_level, phba->fips_spec_rev); 4399 } 4400 if (pmb->u.mb.un.varCfgPort.sec_err) { 4401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4402 "2856 Config Port Security Crypto " 4403 "Error: x%x ", 4404 pmb->u.mb.un.varCfgPort.sec_err); 4405 } 4406 if (pmb->u.mb.un.varCfgPort.gerbm) 4407 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4408 if (pmb->u.mb.un.varCfgPort.gcrp) 4409 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4410 4411 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4412 phba->port_gp = phba->mbox->us.s3_pgp.port; 4413 4414 if (phba->cfg_enable_bg) { 4415 if (pmb->u.mb.un.varCfgPort.gbg) 4416 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4417 else 4418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4419 "0443 Adapter did not grant " 4420 "BlockGuard\n"); 4421 } 4422 } else { 4423 phba->hbq_get = NULL; 4424 phba->port_gp = phba->mbox->us.s2.port; 4425 phba->max_vpi = 0; 4426 } 4427 do_prep_failed: 4428 mempool_free(pmb, phba->mbox_mem_pool); 4429 return rc; 4430 } 4431 4432 4433 /** 4434 * lpfc_sli_hba_setup - SLI intialization function 4435 * @phba: Pointer to HBA context object. 4436 * 4437 * This function is the main SLI intialization function. This function 4438 * is called by the HBA intialization code, HBA reset code and HBA 4439 * error attention handler code. Caller is not required to hold any 4440 * locks. This function issues config_port mailbox command to configure 4441 * the SLI, setup iocb rings and HBQ rings. In the end the function 4442 * calls the config_port_post function to issue init_link mailbox 4443 * command and to start the discovery. The function will return zero 4444 * if successful, else it will return negative error code. 4445 **/ 4446 int 4447 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4448 { 4449 uint32_t rc; 4450 int mode = 3, i; 4451 int longs; 4452 4453 switch (lpfc_sli_mode) { 4454 case 2: 4455 if (phba->cfg_enable_npiv) { 4456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4457 "1824 NPIV enabled: Override lpfc_sli_mode " 4458 "parameter (%d) to auto (0).\n", 4459 lpfc_sli_mode); 4460 break; 4461 } 4462 mode = 2; 4463 break; 4464 case 0: 4465 case 3: 4466 break; 4467 default: 4468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4469 "1819 Unrecognized lpfc_sli_mode " 4470 "parameter: %d.\n", lpfc_sli_mode); 4471 4472 break; 4473 } 4474 4475 rc = lpfc_sli_config_port(phba, mode); 4476 4477 if (rc && lpfc_sli_mode == 3) 4478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4479 "1820 Unable to select SLI-3. " 4480 "Not supported by adapter.\n"); 4481 if (rc && mode != 2) 4482 rc = lpfc_sli_config_port(phba, 2); 4483 if (rc) 4484 goto lpfc_sli_hba_setup_error; 4485 4486 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4487 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4488 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4489 if (!rc) { 4490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4491 "2709 This device supports " 4492 "Advanced Error Reporting (AER)\n"); 4493 spin_lock_irq(&phba->hbalock); 4494 phba->hba_flag |= HBA_AER_ENABLED; 4495 spin_unlock_irq(&phba->hbalock); 4496 } else { 4497 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4498 "2708 This device does not support " 4499 "Advanced Error Reporting (AER)\n"); 4500 phba->cfg_aer_support = 0; 4501 } 4502 } 4503 4504 if (phba->sli_rev == 3) { 4505 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4506 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4507 } else { 4508 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4509 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4510 phba->sli3_options = 0; 4511 } 4512 4513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4514 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4515 phba->sli_rev, phba->max_vpi); 4516 rc = lpfc_sli_ring_map(phba); 4517 4518 if (rc) 4519 goto lpfc_sli_hba_setup_error; 4520 4521 /* Initialize VPIs. */ 4522 if (phba->sli_rev == LPFC_SLI_REV3) { 4523 /* 4524 * The VPI bitmask and physical ID array are allocated 4525 * and initialized once only - at driver load. A port 4526 * reset doesn't need to reinitialize this memory. 4527 */ 4528 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4529 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4530 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4531 GFP_KERNEL); 4532 if (!phba->vpi_bmask) { 4533 rc = -ENOMEM; 4534 goto lpfc_sli_hba_setup_error; 4535 } 4536 4537 phba->vpi_ids = kzalloc( 4538 (phba->max_vpi+1) * sizeof(uint16_t), 4539 GFP_KERNEL); 4540 if (!phba->vpi_ids) { 4541 kfree(phba->vpi_bmask); 4542 rc = -ENOMEM; 4543 goto lpfc_sli_hba_setup_error; 4544 } 4545 for (i = 0; i < phba->max_vpi; i++) 4546 phba->vpi_ids[i] = i; 4547 } 4548 } 4549 4550 /* Init HBQs */ 4551 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4552 rc = lpfc_sli_hbq_setup(phba); 4553 if (rc) 4554 goto lpfc_sli_hba_setup_error; 4555 } 4556 spin_lock_irq(&phba->hbalock); 4557 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4558 spin_unlock_irq(&phba->hbalock); 4559 4560 rc = lpfc_config_port_post(phba); 4561 if (rc) 4562 goto lpfc_sli_hba_setup_error; 4563 4564 return rc; 4565 4566 lpfc_sli_hba_setup_error: 4567 phba->link_state = LPFC_HBA_ERROR; 4568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4569 "0445 Firmware initialization failed\n"); 4570 return rc; 4571 } 4572 4573 /** 4574 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4575 * @phba: Pointer to HBA context object. 4576 * @mboxq: mailbox pointer. 4577 * This function issue a dump mailbox command to read config region 4578 * 23 and parse the records in the region and populate driver 4579 * data structure. 4580 **/ 4581 static int 4582 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4583 { 4584 LPFC_MBOXQ_t *mboxq; 4585 struct lpfc_dmabuf *mp; 4586 struct lpfc_mqe *mqe; 4587 uint32_t data_length; 4588 int rc; 4589 4590 /* Program the default value of vlan_id and fc_map */ 4591 phba->valid_vlan = 0; 4592 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4593 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4594 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4595 4596 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4597 if (!mboxq) 4598 return -ENOMEM; 4599 4600 mqe = &mboxq->u.mqe; 4601 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 4602 rc = -ENOMEM; 4603 goto out_free_mboxq; 4604 } 4605 4606 mp = (struct lpfc_dmabuf *) mboxq->context1; 4607 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4608 4609 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4610 "(%d):2571 Mailbox cmd x%x Status x%x " 4611 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4612 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4613 "CQ: x%x x%x x%x x%x\n", 4614 mboxq->vport ? mboxq->vport->vpi : 0, 4615 bf_get(lpfc_mqe_command, mqe), 4616 bf_get(lpfc_mqe_status, mqe), 4617 mqe->un.mb_words[0], mqe->un.mb_words[1], 4618 mqe->un.mb_words[2], mqe->un.mb_words[3], 4619 mqe->un.mb_words[4], mqe->un.mb_words[5], 4620 mqe->un.mb_words[6], mqe->un.mb_words[7], 4621 mqe->un.mb_words[8], mqe->un.mb_words[9], 4622 mqe->un.mb_words[10], mqe->un.mb_words[11], 4623 mqe->un.mb_words[12], mqe->un.mb_words[13], 4624 mqe->un.mb_words[14], mqe->un.mb_words[15], 4625 mqe->un.mb_words[16], mqe->un.mb_words[50], 4626 mboxq->mcqe.word0, 4627 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4628 mboxq->mcqe.trailer); 4629 4630 if (rc) { 4631 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4632 kfree(mp); 4633 rc = -EIO; 4634 goto out_free_mboxq; 4635 } 4636 data_length = mqe->un.mb_words[5]; 4637 if (data_length > DMP_RGN23_SIZE) { 4638 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4639 kfree(mp); 4640 rc = -EIO; 4641 goto out_free_mboxq; 4642 } 4643 4644 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4645 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4646 kfree(mp); 4647 rc = 0; 4648 4649 out_free_mboxq: 4650 mempool_free(mboxq, phba->mbox_mem_pool); 4651 return rc; 4652 } 4653 4654 /** 4655 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4656 * @phba: pointer to lpfc hba data structure. 4657 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4658 * @vpd: pointer to the memory to hold resulting port vpd data. 4659 * @vpd_size: On input, the number of bytes allocated to @vpd. 4660 * On output, the number of data bytes in @vpd. 4661 * 4662 * This routine executes a READ_REV SLI4 mailbox command. In 4663 * addition, this routine gets the port vpd data. 4664 * 4665 * Return codes 4666 * 0 - successful 4667 * -ENOMEM - could not allocated memory. 4668 **/ 4669 static int 4670 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4671 uint8_t *vpd, uint32_t *vpd_size) 4672 { 4673 int rc = 0; 4674 uint32_t dma_size; 4675 struct lpfc_dmabuf *dmabuf; 4676 struct lpfc_mqe *mqe; 4677 4678 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4679 if (!dmabuf) 4680 return -ENOMEM; 4681 4682 /* 4683 * Get a DMA buffer for the vpd data resulting from the READ_REV 4684 * mailbox command. 4685 */ 4686 dma_size = *vpd_size; 4687 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4688 dma_size, 4689 &dmabuf->phys, 4690 GFP_KERNEL); 4691 if (!dmabuf->virt) { 4692 kfree(dmabuf); 4693 return -ENOMEM; 4694 } 4695 memset(dmabuf->virt, 0, dma_size); 4696 4697 /* 4698 * The SLI4 implementation of READ_REV conflicts at word1, 4699 * bits 31:16 and SLI4 adds vpd functionality not present 4700 * in SLI3. This code corrects the conflicts. 4701 */ 4702 lpfc_read_rev(phba, mboxq); 4703 mqe = &mboxq->u.mqe; 4704 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4705 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4706 mqe->un.read_rev.word1 &= 0x0000FFFF; 4707 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4708 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4709 4710 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4711 if (rc) { 4712 dma_free_coherent(&phba->pcidev->dev, dma_size, 4713 dmabuf->virt, dmabuf->phys); 4714 kfree(dmabuf); 4715 return -EIO; 4716 } 4717 4718 /* 4719 * The available vpd length cannot be bigger than the 4720 * DMA buffer passed to the port. Catch the less than 4721 * case and update the caller's size. 4722 */ 4723 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4724 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4725 4726 memcpy(vpd, dmabuf->virt, *vpd_size); 4727 4728 dma_free_coherent(&phba->pcidev->dev, dma_size, 4729 dmabuf->virt, dmabuf->phys); 4730 kfree(dmabuf); 4731 return 0; 4732 } 4733 4734 /** 4735 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 4736 * @phba: pointer to lpfc hba data structure. 4737 * 4738 * This routine retrieves SLI4 device physical port name this PCI function 4739 * is attached to. 4740 * 4741 * Return codes 4742 * 0 - sucessful 4743 * otherwise - failed to retrieve physical port name 4744 **/ 4745 static int 4746 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 4747 { 4748 LPFC_MBOXQ_t *mboxq; 4749 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 4750 struct lpfc_controller_attribute *cntl_attr; 4751 struct lpfc_mbx_get_port_name *get_port_name; 4752 void *virtaddr = NULL; 4753 uint32_t alloclen, reqlen; 4754 uint32_t shdr_status, shdr_add_status; 4755 union lpfc_sli4_cfg_shdr *shdr; 4756 char cport_name = 0; 4757 int rc; 4758 4759 /* We assume nothing at this point */ 4760 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4761 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 4762 4763 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4764 if (!mboxq) 4765 return -ENOMEM; 4766 /* obtain link type and link number via READ_CONFIG */ 4767 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4768 lpfc_sli4_read_config(phba); 4769 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 4770 goto retrieve_ppname; 4771 4772 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 4773 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 4774 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4775 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 4776 LPFC_SLI4_MBX_NEMBED); 4777 if (alloclen < reqlen) { 4778 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4779 "3084 Allocated DMA memory size (%d) is " 4780 "less than the requested DMA memory size " 4781 "(%d)\n", alloclen, reqlen); 4782 rc = -ENOMEM; 4783 goto out_free_mboxq; 4784 } 4785 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4786 virtaddr = mboxq->sge_array->addr[0]; 4787 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 4788 shdr = &mbx_cntl_attr->cfg_shdr; 4789 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4790 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4791 if (shdr_status || shdr_add_status || rc) { 4792 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4793 "3085 Mailbox x%x (x%x/x%x) failed, " 4794 "rc:x%x, status:x%x, add_status:x%x\n", 4795 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4796 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4797 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4798 rc, shdr_status, shdr_add_status); 4799 rc = -ENXIO; 4800 goto out_free_mboxq; 4801 } 4802 cntl_attr = &mbx_cntl_attr->cntl_attr; 4803 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 4804 phba->sli4_hba.lnk_info.lnk_tp = 4805 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 4806 phba->sli4_hba.lnk_info.lnk_no = 4807 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 4808 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4809 "3086 lnk_type:%d, lnk_numb:%d\n", 4810 phba->sli4_hba.lnk_info.lnk_tp, 4811 phba->sli4_hba.lnk_info.lnk_no); 4812 4813 retrieve_ppname: 4814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4815 LPFC_MBOX_OPCODE_GET_PORT_NAME, 4816 sizeof(struct lpfc_mbx_get_port_name) - 4817 sizeof(struct lpfc_sli4_cfg_mhdr), 4818 LPFC_SLI4_MBX_EMBED); 4819 get_port_name = &mboxq->u.mqe.un.get_port_name; 4820 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 4821 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 4822 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 4823 phba->sli4_hba.lnk_info.lnk_tp); 4824 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4825 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4826 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4827 if (shdr_status || shdr_add_status || rc) { 4828 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4829 "3087 Mailbox x%x (x%x/x%x) failed: " 4830 "rc:x%x, status:x%x, add_status:x%x\n", 4831 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4832 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4833 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4834 rc, shdr_status, shdr_add_status); 4835 rc = -ENXIO; 4836 goto out_free_mboxq; 4837 } 4838 switch (phba->sli4_hba.lnk_info.lnk_no) { 4839 case LPFC_LINK_NUMBER_0: 4840 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 4841 &get_port_name->u.response); 4842 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4843 break; 4844 case LPFC_LINK_NUMBER_1: 4845 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 4846 &get_port_name->u.response); 4847 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4848 break; 4849 case LPFC_LINK_NUMBER_2: 4850 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 4851 &get_port_name->u.response); 4852 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4853 break; 4854 case LPFC_LINK_NUMBER_3: 4855 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 4856 &get_port_name->u.response); 4857 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4858 break; 4859 default: 4860 break; 4861 } 4862 4863 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 4864 phba->Port[0] = cport_name; 4865 phba->Port[1] = '\0'; 4866 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4867 "3091 SLI get port name: %s\n", phba->Port); 4868 } 4869 4870 out_free_mboxq: 4871 if (rc != MBX_TIMEOUT) { 4872 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 4873 lpfc_sli4_mbox_cmd_free(phba, mboxq); 4874 else 4875 mempool_free(mboxq, phba->mbox_mem_pool); 4876 } 4877 return rc; 4878 } 4879 4880 /** 4881 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4882 * @phba: pointer to lpfc hba data structure. 4883 * 4884 * This routine is called to explicitly arm the SLI4 device's completion and 4885 * event queues 4886 **/ 4887 static void 4888 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4889 { 4890 uint8_t fcp_eqidx; 4891 4892 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4893 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4894 fcp_eqidx = 0; 4895 if (phba->sli4_hba.fcp_cq) { 4896 do 4897 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4898 LPFC_QUEUE_REARM); 4899 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4900 } 4901 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4902 if (phba->sli4_hba.fp_eq) { 4903 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; 4904 fcp_eqidx++) 4905 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4906 LPFC_QUEUE_REARM); 4907 } 4908 } 4909 4910 /** 4911 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4912 * @phba: Pointer to HBA context object. 4913 * @type: The resource extent type. 4914 * @extnt_count: buffer to hold port available extent count. 4915 * @extnt_size: buffer to hold element count per extent. 4916 * 4917 * This function calls the port and retrievs the number of available 4918 * extents and their size for a particular extent type. 4919 * 4920 * Returns: 0 if successful. Nonzero otherwise. 4921 **/ 4922 int 4923 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4924 uint16_t *extnt_count, uint16_t *extnt_size) 4925 { 4926 int rc = 0; 4927 uint32_t length; 4928 uint32_t mbox_tmo; 4929 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 4930 LPFC_MBOXQ_t *mbox; 4931 4932 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4933 if (!mbox) 4934 return -ENOMEM; 4935 4936 /* Find out how many extents are available for this resource type */ 4937 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 4938 sizeof(struct lpfc_sli4_cfg_mhdr)); 4939 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 4940 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 4941 length, LPFC_SLI4_MBX_EMBED); 4942 4943 /* Send an extents count of 0 - the GET doesn't use it. */ 4944 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 4945 LPFC_SLI4_MBX_EMBED); 4946 if (unlikely(rc)) { 4947 rc = -EIO; 4948 goto err_exit; 4949 } 4950 4951 if (!phba->sli4_hba.intr_enable) 4952 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 4953 else { 4954 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 4955 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 4956 } 4957 if (unlikely(rc)) { 4958 rc = -EIO; 4959 goto err_exit; 4960 } 4961 4962 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 4963 if (bf_get(lpfc_mbox_hdr_status, 4964 &rsrc_info->header.cfg_shdr.response)) { 4965 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4966 "2930 Failed to get resource extents " 4967 "Status 0x%x Add'l Status 0x%x\n", 4968 bf_get(lpfc_mbox_hdr_status, 4969 &rsrc_info->header.cfg_shdr.response), 4970 bf_get(lpfc_mbox_hdr_add_status, 4971 &rsrc_info->header.cfg_shdr.response)); 4972 rc = -EIO; 4973 goto err_exit; 4974 } 4975 4976 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 4977 &rsrc_info->u.rsp); 4978 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 4979 &rsrc_info->u.rsp); 4980 4981 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4982 "3162 Retrieved extents type-%d from port: count:%d, " 4983 "size:%d\n", type, *extnt_count, *extnt_size); 4984 4985 err_exit: 4986 mempool_free(mbox, phba->mbox_mem_pool); 4987 return rc; 4988 } 4989 4990 /** 4991 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 4992 * @phba: Pointer to HBA context object. 4993 * @type: The extent type to check. 4994 * 4995 * This function reads the current available extents from the port and checks 4996 * if the extent count or extent size has changed since the last access. 4997 * Callers use this routine post port reset to understand if there is a 4998 * extent reprovisioning requirement. 4999 * 5000 * Returns: 5001 * -Error: error indicates problem. 5002 * 1: Extent count or size has changed. 5003 * 0: No changes. 5004 **/ 5005 static int 5006 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5007 { 5008 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5009 uint16_t size_diff, rsrc_ext_size; 5010 int rc = 0; 5011 struct lpfc_rsrc_blks *rsrc_entry; 5012 struct list_head *rsrc_blk_list = NULL; 5013 5014 size_diff = 0; 5015 curr_ext_cnt = 0; 5016 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5017 &rsrc_ext_cnt, 5018 &rsrc_ext_size); 5019 if (unlikely(rc)) 5020 return -EIO; 5021 5022 switch (type) { 5023 case LPFC_RSC_TYPE_FCOE_RPI: 5024 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5025 break; 5026 case LPFC_RSC_TYPE_FCOE_VPI: 5027 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5028 break; 5029 case LPFC_RSC_TYPE_FCOE_XRI: 5030 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5031 break; 5032 case LPFC_RSC_TYPE_FCOE_VFI: 5033 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5034 break; 5035 default: 5036 break; 5037 } 5038 5039 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5040 curr_ext_cnt++; 5041 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5042 size_diff++; 5043 } 5044 5045 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5046 rc = 1; 5047 5048 return rc; 5049 } 5050 5051 /** 5052 * lpfc_sli4_cfg_post_extnts - 5053 * @phba: Pointer to HBA context object. 5054 * @extnt_cnt - number of available extents. 5055 * @type - the extent type (rpi, xri, vfi, vpi). 5056 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5057 * @mbox - pointer to the caller's allocated mailbox structure. 5058 * 5059 * This function executes the extents allocation request. It also 5060 * takes care of the amount of memory needed to allocate or get the 5061 * allocated extents. It is the caller's responsibility to evaluate 5062 * the response. 5063 * 5064 * Returns: 5065 * -Error: Error value describes the condition found. 5066 * 0: if successful 5067 **/ 5068 static int 5069 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5070 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5071 { 5072 int rc = 0; 5073 uint32_t req_len; 5074 uint32_t emb_len; 5075 uint32_t alloc_len, mbox_tmo; 5076 5077 /* Calculate the total requested length of the dma memory */ 5078 req_len = extnt_cnt * sizeof(uint16_t); 5079 5080 /* 5081 * Calculate the size of an embedded mailbox. The uint32_t 5082 * accounts for extents-specific word. 5083 */ 5084 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5085 sizeof(uint32_t); 5086 5087 /* 5088 * Presume the allocation and response will fit into an embedded 5089 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5090 */ 5091 *emb = LPFC_SLI4_MBX_EMBED; 5092 if (req_len > emb_len) { 5093 req_len = extnt_cnt * sizeof(uint16_t) + 5094 sizeof(union lpfc_sli4_cfg_shdr) + 5095 sizeof(uint32_t); 5096 *emb = LPFC_SLI4_MBX_NEMBED; 5097 } 5098 5099 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5100 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5101 req_len, *emb); 5102 if (alloc_len < req_len) { 5103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5104 "2982 Allocated DMA memory size (x%x) is " 5105 "less than the requested DMA memory " 5106 "size (x%x)\n", alloc_len, req_len); 5107 return -ENOMEM; 5108 } 5109 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5110 if (unlikely(rc)) 5111 return -EIO; 5112 5113 if (!phba->sli4_hba.intr_enable) 5114 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5115 else { 5116 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5117 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5118 } 5119 5120 if (unlikely(rc)) 5121 rc = -EIO; 5122 return rc; 5123 } 5124 5125 /** 5126 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5127 * @phba: Pointer to HBA context object. 5128 * @type: The resource extent type to allocate. 5129 * 5130 * This function allocates the number of elements for the specified 5131 * resource type. 5132 **/ 5133 static int 5134 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5135 { 5136 bool emb = false; 5137 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5138 uint16_t rsrc_id, rsrc_start, j, k; 5139 uint16_t *ids; 5140 int i, rc; 5141 unsigned long longs; 5142 unsigned long *bmask; 5143 struct lpfc_rsrc_blks *rsrc_blks; 5144 LPFC_MBOXQ_t *mbox; 5145 uint32_t length; 5146 struct lpfc_id_range *id_array = NULL; 5147 void *virtaddr = NULL; 5148 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5149 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5150 struct list_head *ext_blk_list; 5151 5152 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5153 &rsrc_cnt, 5154 &rsrc_size); 5155 if (unlikely(rc)) 5156 return -EIO; 5157 5158 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5160 "3009 No available Resource Extents " 5161 "for resource type 0x%x: Count: 0x%x, " 5162 "Size 0x%x\n", type, rsrc_cnt, 5163 rsrc_size); 5164 return -ENOMEM; 5165 } 5166 5167 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5168 "2903 Post resource extents type-0x%x: " 5169 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5170 5171 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5172 if (!mbox) 5173 return -ENOMEM; 5174 5175 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5176 if (unlikely(rc)) { 5177 rc = -EIO; 5178 goto err_exit; 5179 } 5180 5181 /* 5182 * Figure out where the response is located. Then get local pointers 5183 * to the response data. The port does not guarantee to respond to 5184 * all extents counts request so update the local variable with the 5185 * allocated count from the port. 5186 */ 5187 if (emb == LPFC_SLI4_MBX_EMBED) { 5188 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5189 id_array = &rsrc_ext->u.rsp.id[0]; 5190 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5191 } else { 5192 virtaddr = mbox->sge_array->addr[0]; 5193 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5194 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5195 id_array = &n_rsrc->id; 5196 } 5197 5198 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5199 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5200 5201 /* 5202 * Based on the resource size and count, correct the base and max 5203 * resource values. 5204 */ 5205 length = sizeof(struct lpfc_rsrc_blks); 5206 switch (type) { 5207 case LPFC_RSC_TYPE_FCOE_RPI: 5208 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5209 sizeof(unsigned long), 5210 GFP_KERNEL); 5211 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5212 rc = -ENOMEM; 5213 goto err_exit; 5214 } 5215 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5216 sizeof(uint16_t), 5217 GFP_KERNEL); 5218 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5219 kfree(phba->sli4_hba.rpi_bmask); 5220 rc = -ENOMEM; 5221 goto err_exit; 5222 } 5223 5224 /* 5225 * The next_rpi was initialized with the maximum available 5226 * count but the port may allocate a smaller number. Catch 5227 * that case and update the next_rpi. 5228 */ 5229 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5230 5231 /* Initialize local ptrs for common extent processing later. */ 5232 bmask = phba->sli4_hba.rpi_bmask; 5233 ids = phba->sli4_hba.rpi_ids; 5234 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5235 break; 5236 case LPFC_RSC_TYPE_FCOE_VPI: 5237 phba->vpi_bmask = kzalloc(longs * 5238 sizeof(unsigned long), 5239 GFP_KERNEL); 5240 if (unlikely(!phba->vpi_bmask)) { 5241 rc = -ENOMEM; 5242 goto err_exit; 5243 } 5244 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5245 sizeof(uint16_t), 5246 GFP_KERNEL); 5247 if (unlikely(!phba->vpi_ids)) { 5248 kfree(phba->vpi_bmask); 5249 rc = -ENOMEM; 5250 goto err_exit; 5251 } 5252 5253 /* Initialize local ptrs for common extent processing later. */ 5254 bmask = phba->vpi_bmask; 5255 ids = phba->vpi_ids; 5256 ext_blk_list = &phba->lpfc_vpi_blk_list; 5257 break; 5258 case LPFC_RSC_TYPE_FCOE_XRI: 5259 phba->sli4_hba.xri_bmask = kzalloc(longs * 5260 sizeof(unsigned long), 5261 GFP_KERNEL); 5262 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5263 rc = -ENOMEM; 5264 goto err_exit; 5265 } 5266 phba->sli4_hba.max_cfg_param.xri_used = 0; 5267 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5268 sizeof(uint16_t), 5269 GFP_KERNEL); 5270 if (unlikely(!phba->sli4_hba.xri_ids)) { 5271 kfree(phba->sli4_hba.xri_bmask); 5272 rc = -ENOMEM; 5273 goto err_exit; 5274 } 5275 5276 /* Initialize local ptrs for common extent processing later. */ 5277 bmask = phba->sli4_hba.xri_bmask; 5278 ids = phba->sli4_hba.xri_ids; 5279 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5280 break; 5281 case LPFC_RSC_TYPE_FCOE_VFI: 5282 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5283 sizeof(unsigned long), 5284 GFP_KERNEL); 5285 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5286 rc = -ENOMEM; 5287 goto err_exit; 5288 } 5289 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5290 sizeof(uint16_t), 5291 GFP_KERNEL); 5292 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5293 kfree(phba->sli4_hba.vfi_bmask); 5294 rc = -ENOMEM; 5295 goto err_exit; 5296 } 5297 5298 /* Initialize local ptrs for common extent processing later. */ 5299 bmask = phba->sli4_hba.vfi_bmask; 5300 ids = phba->sli4_hba.vfi_ids; 5301 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5302 break; 5303 default: 5304 /* Unsupported Opcode. Fail call. */ 5305 id_array = NULL; 5306 bmask = NULL; 5307 ids = NULL; 5308 ext_blk_list = NULL; 5309 goto err_exit; 5310 } 5311 5312 /* 5313 * Complete initializing the extent configuration with the 5314 * allocated ids assigned to this function. The bitmask serves 5315 * as an index into the array and manages the available ids. The 5316 * array just stores the ids communicated to the port via the wqes. 5317 */ 5318 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5319 if ((i % 2) == 0) 5320 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5321 &id_array[k]); 5322 else 5323 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5324 &id_array[k]); 5325 5326 rsrc_blks = kzalloc(length, GFP_KERNEL); 5327 if (unlikely(!rsrc_blks)) { 5328 rc = -ENOMEM; 5329 kfree(bmask); 5330 kfree(ids); 5331 goto err_exit; 5332 } 5333 rsrc_blks->rsrc_start = rsrc_id; 5334 rsrc_blks->rsrc_size = rsrc_size; 5335 list_add_tail(&rsrc_blks->list, ext_blk_list); 5336 rsrc_start = rsrc_id; 5337 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) 5338 phba->sli4_hba.scsi_xri_start = rsrc_start + 5339 lpfc_sli4_get_els_iocb_cnt(phba); 5340 5341 while (rsrc_id < (rsrc_start + rsrc_size)) { 5342 ids[j] = rsrc_id; 5343 rsrc_id++; 5344 j++; 5345 } 5346 /* Entire word processed. Get next word.*/ 5347 if ((i % 2) == 1) 5348 k++; 5349 } 5350 err_exit: 5351 lpfc_sli4_mbox_cmd_free(phba, mbox); 5352 return rc; 5353 } 5354 5355 /** 5356 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5357 * @phba: Pointer to HBA context object. 5358 * @type: the extent's type. 5359 * 5360 * This function deallocates all extents of a particular resource type. 5361 * SLI4 does not allow for deallocating a particular extent range. It 5362 * is the caller's responsibility to release all kernel memory resources. 5363 **/ 5364 static int 5365 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5366 { 5367 int rc; 5368 uint32_t length, mbox_tmo = 0; 5369 LPFC_MBOXQ_t *mbox; 5370 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5371 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5372 5373 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5374 if (!mbox) 5375 return -ENOMEM; 5376 5377 /* 5378 * This function sends an embedded mailbox because it only sends the 5379 * the resource type. All extents of this type are released by the 5380 * port. 5381 */ 5382 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5383 sizeof(struct lpfc_sli4_cfg_mhdr)); 5384 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5385 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5386 length, LPFC_SLI4_MBX_EMBED); 5387 5388 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5389 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5390 LPFC_SLI4_MBX_EMBED); 5391 if (unlikely(rc)) { 5392 rc = -EIO; 5393 goto out_free_mbox; 5394 } 5395 if (!phba->sli4_hba.intr_enable) 5396 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5397 else { 5398 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5399 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5400 } 5401 if (unlikely(rc)) { 5402 rc = -EIO; 5403 goto out_free_mbox; 5404 } 5405 5406 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5407 if (bf_get(lpfc_mbox_hdr_status, 5408 &dealloc_rsrc->header.cfg_shdr.response)) { 5409 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5410 "2919 Failed to release resource extents " 5411 "for type %d - Status 0x%x Add'l Status 0x%x. " 5412 "Resource memory not released.\n", 5413 type, 5414 bf_get(lpfc_mbox_hdr_status, 5415 &dealloc_rsrc->header.cfg_shdr.response), 5416 bf_get(lpfc_mbox_hdr_add_status, 5417 &dealloc_rsrc->header.cfg_shdr.response)); 5418 rc = -EIO; 5419 goto out_free_mbox; 5420 } 5421 5422 /* Release kernel memory resources for the specific type. */ 5423 switch (type) { 5424 case LPFC_RSC_TYPE_FCOE_VPI: 5425 kfree(phba->vpi_bmask); 5426 kfree(phba->vpi_ids); 5427 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5428 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5429 &phba->lpfc_vpi_blk_list, list) { 5430 list_del_init(&rsrc_blk->list); 5431 kfree(rsrc_blk); 5432 } 5433 break; 5434 case LPFC_RSC_TYPE_FCOE_XRI: 5435 kfree(phba->sli4_hba.xri_bmask); 5436 kfree(phba->sli4_hba.xri_ids); 5437 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5438 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5439 list_del_init(&rsrc_blk->list); 5440 kfree(rsrc_blk); 5441 } 5442 break; 5443 case LPFC_RSC_TYPE_FCOE_VFI: 5444 kfree(phba->sli4_hba.vfi_bmask); 5445 kfree(phba->sli4_hba.vfi_ids); 5446 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5447 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5448 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5449 list_del_init(&rsrc_blk->list); 5450 kfree(rsrc_blk); 5451 } 5452 break; 5453 case LPFC_RSC_TYPE_FCOE_RPI: 5454 /* RPI bitmask and physical id array are cleaned up earlier. */ 5455 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5456 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5457 list_del_init(&rsrc_blk->list); 5458 kfree(rsrc_blk); 5459 } 5460 break; 5461 default: 5462 break; 5463 } 5464 5465 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5466 5467 out_free_mbox: 5468 mempool_free(mbox, phba->mbox_mem_pool); 5469 return rc; 5470 } 5471 5472 /** 5473 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5474 * @phba: Pointer to HBA context object. 5475 * 5476 * This function allocates all SLI4 resource identifiers. 5477 **/ 5478 int 5479 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5480 { 5481 int i, rc, error = 0; 5482 uint16_t count, base; 5483 unsigned long longs; 5484 5485 if (!phba->sli4_hba.rpi_hdrs_in_use) 5486 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5487 if (phba->sli4_hba.extents_in_use) { 5488 /* 5489 * The port supports resource extents. The XRI, VPI, VFI, RPI 5490 * resource extent count must be read and allocated before 5491 * provisioning the resource id arrays. 5492 */ 5493 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5494 LPFC_IDX_RSRC_RDY) { 5495 /* 5496 * Extent-based resources are set - the driver could 5497 * be in a port reset. Figure out if any corrective 5498 * actions need to be taken. 5499 */ 5500 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5501 LPFC_RSC_TYPE_FCOE_VFI); 5502 if (rc != 0) 5503 error++; 5504 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5505 LPFC_RSC_TYPE_FCOE_VPI); 5506 if (rc != 0) 5507 error++; 5508 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5509 LPFC_RSC_TYPE_FCOE_XRI); 5510 if (rc != 0) 5511 error++; 5512 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5513 LPFC_RSC_TYPE_FCOE_RPI); 5514 if (rc != 0) 5515 error++; 5516 5517 /* 5518 * It's possible that the number of resources 5519 * provided to this port instance changed between 5520 * resets. Detect this condition and reallocate 5521 * resources. Otherwise, there is no action. 5522 */ 5523 if (error) { 5524 lpfc_printf_log(phba, KERN_INFO, 5525 LOG_MBOX | LOG_INIT, 5526 "2931 Detected extent resource " 5527 "change. Reallocating all " 5528 "extents.\n"); 5529 rc = lpfc_sli4_dealloc_extent(phba, 5530 LPFC_RSC_TYPE_FCOE_VFI); 5531 rc = lpfc_sli4_dealloc_extent(phba, 5532 LPFC_RSC_TYPE_FCOE_VPI); 5533 rc = lpfc_sli4_dealloc_extent(phba, 5534 LPFC_RSC_TYPE_FCOE_XRI); 5535 rc = lpfc_sli4_dealloc_extent(phba, 5536 LPFC_RSC_TYPE_FCOE_RPI); 5537 } else 5538 return 0; 5539 } 5540 5541 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5542 if (unlikely(rc)) 5543 goto err_exit; 5544 5545 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5546 if (unlikely(rc)) 5547 goto err_exit; 5548 5549 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5550 if (unlikely(rc)) 5551 goto err_exit; 5552 5553 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5554 if (unlikely(rc)) 5555 goto err_exit; 5556 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5557 LPFC_IDX_RSRC_RDY); 5558 return rc; 5559 } else { 5560 /* 5561 * The port does not support resource extents. The XRI, VPI, 5562 * VFI, RPI resource ids were determined from READ_CONFIG. 5563 * Just allocate the bitmasks and provision the resource id 5564 * arrays. If a port reset is active, the resources don't 5565 * need any action - just exit. 5566 */ 5567 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5568 LPFC_IDX_RSRC_RDY) { 5569 lpfc_sli4_dealloc_resource_identifiers(phba); 5570 lpfc_sli4_remove_rpis(phba); 5571 } 5572 /* RPIs. */ 5573 count = phba->sli4_hba.max_cfg_param.max_rpi; 5574 base = phba->sli4_hba.max_cfg_param.rpi_base; 5575 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5576 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5577 sizeof(unsigned long), 5578 GFP_KERNEL); 5579 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5580 rc = -ENOMEM; 5581 goto err_exit; 5582 } 5583 phba->sli4_hba.rpi_ids = kzalloc(count * 5584 sizeof(uint16_t), 5585 GFP_KERNEL); 5586 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5587 rc = -ENOMEM; 5588 goto free_rpi_bmask; 5589 } 5590 5591 for (i = 0; i < count; i++) 5592 phba->sli4_hba.rpi_ids[i] = base + i; 5593 5594 /* VPIs. */ 5595 count = phba->sli4_hba.max_cfg_param.max_vpi; 5596 base = phba->sli4_hba.max_cfg_param.vpi_base; 5597 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5598 phba->vpi_bmask = kzalloc(longs * 5599 sizeof(unsigned long), 5600 GFP_KERNEL); 5601 if (unlikely(!phba->vpi_bmask)) { 5602 rc = -ENOMEM; 5603 goto free_rpi_ids; 5604 } 5605 phba->vpi_ids = kzalloc(count * 5606 sizeof(uint16_t), 5607 GFP_KERNEL); 5608 if (unlikely(!phba->vpi_ids)) { 5609 rc = -ENOMEM; 5610 goto free_vpi_bmask; 5611 } 5612 5613 for (i = 0; i < count; i++) 5614 phba->vpi_ids[i] = base + i; 5615 5616 /* XRIs. */ 5617 count = phba->sli4_hba.max_cfg_param.max_xri; 5618 base = phba->sli4_hba.max_cfg_param.xri_base; 5619 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5620 phba->sli4_hba.xri_bmask = kzalloc(longs * 5621 sizeof(unsigned long), 5622 GFP_KERNEL); 5623 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5624 rc = -ENOMEM; 5625 goto free_vpi_ids; 5626 } 5627 phba->sli4_hba.max_cfg_param.xri_used = 0; 5628 phba->sli4_hba.xri_ids = kzalloc(count * 5629 sizeof(uint16_t), 5630 GFP_KERNEL); 5631 if (unlikely(!phba->sli4_hba.xri_ids)) { 5632 rc = -ENOMEM; 5633 goto free_xri_bmask; 5634 } 5635 5636 for (i = 0; i < count; i++) 5637 phba->sli4_hba.xri_ids[i] = base + i; 5638 5639 /* VFIs. */ 5640 count = phba->sli4_hba.max_cfg_param.max_vfi; 5641 base = phba->sli4_hba.max_cfg_param.vfi_base; 5642 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5643 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5644 sizeof(unsigned long), 5645 GFP_KERNEL); 5646 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5647 rc = -ENOMEM; 5648 goto free_xri_ids; 5649 } 5650 phba->sli4_hba.vfi_ids = kzalloc(count * 5651 sizeof(uint16_t), 5652 GFP_KERNEL); 5653 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5654 rc = -ENOMEM; 5655 goto free_vfi_bmask; 5656 } 5657 5658 for (i = 0; i < count; i++) 5659 phba->sli4_hba.vfi_ids[i] = base + i; 5660 5661 /* 5662 * Mark all resources ready. An HBA reset doesn't need 5663 * to reset the initialization. 5664 */ 5665 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5666 LPFC_IDX_RSRC_RDY); 5667 return 0; 5668 } 5669 5670 free_vfi_bmask: 5671 kfree(phba->sli4_hba.vfi_bmask); 5672 free_xri_ids: 5673 kfree(phba->sli4_hba.xri_ids); 5674 free_xri_bmask: 5675 kfree(phba->sli4_hba.xri_bmask); 5676 free_vpi_ids: 5677 kfree(phba->vpi_ids); 5678 free_vpi_bmask: 5679 kfree(phba->vpi_bmask); 5680 free_rpi_ids: 5681 kfree(phba->sli4_hba.rpi_ids); 5682 free_rpi_bmask: 5683 kfree(phba->sli4_hba.rpi_bmask); 5684 err_exit: 5685 return rc; 5686 } 5687 5688 /** 5689 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 5690 * @phba: Pointer to HBA context object. 5691 * 5692 * This function allocates the number of elements for the specified 5693 * resource type. 5694 **/ 5695 int 5696 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 5697 { 5698 if (phba->sli4_hba.extents_in_use) { 5699 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5700 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5701 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5702 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5703 } else { 5704 kfree(phba->vpi_bmask); 5705 kfree(phba->vpi_ids); 5706 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5707 kfree(phba->sli4_hba.xri_bmask); 5708 kfree(phba->sli4_hba.xri_ids); 5709 kfree(phba->sli4_hba.vfi_bmask); 5710 kfree(phba->sli4_hba.vfi_ids); 5711 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5712 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5713 } 5714 5715 return 0; 5716 } 5717 5718 /** 5719 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 5720 * @phba: Pointer to HBA context object. 5721 * @type: The resource extent type. 5722 * @extnt_count: buffer to hold port extent count response 5723 * @extnt_size: buffer to hold port extent size response. 5724 * 5725 * This function calls the port to read the host allocated extents 5726 * for a particular type. 5727 **/ 5728 int 5729 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 5730 uint16_t *extnt_cnt, uint16_t *extnt_size) 5731 { 5732 bool emb; 5733 int rc = 0; 5734 uint16_t curr_blks = 0; 5735 uint32_t req_len, emb_len; 5736 uint32_t alloc_len, mbox_tmo; 5737 struct list_head *blk_list_head; 5738 struct lpfc_rsrc_blks *rsrc_blk; 5739 LPFC_MBOXQ_t *mbox; 5740 void *virtaddr = NULL; 5741 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5742 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5743 union lpfc_sli4_cfg_shdr *shdr; 5744 5745 switch (type) { 5746 case LPFC_RSC_TYPE_FCOE_VPI: 5747 blk_list_head = &phba->lpfc_vpi_blk_list; 5748 break; 5749 case LPFC_RSC_TYPE_FCOE_XRI: 5750 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 5751 break; 5752 case LPFC_RSC_TYPE_FCOE_VFI: 5753 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 5754 break; 5755 case LPFC_RSC_TYPE_FCOE_RPI: 5756 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 5757 break; 5758 default: 5759 return -EIO; 5760 } 5761 5762 /* Count the number of extents currently allocatd for this type. */ 5763 list_for_each_entry(rsrc_blk, blk_list_head, list) { 5764 if (curr_blks == 0) { 5765 /* 5766 * The GET_ALLOCATED mailbox does not return the size, 5767 * just the count. The size should be just the size 5768 * stored in the current allocated block and all sizes 5769 * for an extent type are the same so set the return 5770 * value now. 5771 */ 5772 *extnt_size = rsrc_blk->rsrc_size; 5773 } 5774 curr_blks++; 5775 } 5776 5777 /* Calculate the total requested length of the dma memory. */ 5778 req_len = curr_blks * sizeof(uint16_t); 5779 5780 /* 5781 * Calculate the size of an embedded mailbox. The uint32_t 5782 * accounts for extents-specific word. 5783 */ 5784 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5785 sizeof(uint32_t); 5786 5787 /* 5788 * Presume the allocation and response will fit into an embedded 5789 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5790 */ 5791 emb = LPFC_SLI4_MBX_EMBED; 5792 req_len = emb_len; 5793 if (req_len > emb_len) { 5794 req_len = curr_blks * sizeof(uint16_t) + 5795 sizeof(union lpfc_sli4_cfg_shdr) + 5796 sizeof(uint32_t); 5797 emb = LPFC_SLI4_MBX_NEMBED; 5798 } 5799 5800 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5801 if (!mbox) 5802 return -ENOMEM; 5803 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 5804 5805 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5806 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 5807 req_len, emb); 5808 if (alloc_len < req_len) { 5809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5810 "2983 Allocated DMA memory size (x%x) is " 5811 "less than the requested DMA memory " 5812 "size (x%x)\n", alloc_len, req_len); 5813 rc = -ENOMEM; 5814 goto err_exit; 5815 } 5816 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 5817 if (unlikely(rc)) { 5818 rc = -EIO; 5819 goto err_exit; 5820 } 5821 5822 if (!phba->sli4_hba.intr_enable) 5823 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5824 else { 5825 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5826 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5827 } 5828 5829 if (unlikely(rc)) { 5830 rc = -EIO; 5831 goto err_exit; 5832 } 5833 5834 /* 5835 * Figure out where the response is located. Then get local pointers 5836 * to the response data. The port does not guarantee to respond to 5837 * all extents counts request so update the local variable with the 5838 * allocated count from the port. 5839 */ 5840 if (emb == LPFC_SLI4_MBX_EMBED) { 5841 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5842 shdr = &rsrc_ext->header.cfg_shdr; 5843 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5844 } else { 5845 virtaddr = mbox->sge_array->addr[0]; 5846 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5847 shdr = &n_rsrc->cfg_shdr; 5848 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5849 } 5850 5851 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 5852 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5853 "2984 Failed to read allocated resources " 5854 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 5855 type, 5856 bf_get(lpfc_mbox_hdr_status, &shdr->response), 5857 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 5858 rc = -EIO; 5859 goto err_exit; 5860 } 5861 err_exit: 5862 lpfc_sli4_mbox_cmd_free(phba, mbox); 5863 return rc; 5864 } 5865 5866 /** 5867 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block 5868 * @phba: pointer to lpfc hba data structure. 5869 * 5870 * This routine walks the list of els buffers that have been allocated and 5871 * repost them to the port by using SGL block post. This is needed after a 5872 * pci_function_reset/warm_start or start. It attempts to construct blocks 5873 * of els buffer sgls which contains contiguous xris and uses the non-embedded 5874 * SGL block post mailbox commands to post them to the port. For single els 5875 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 5876 * mailbox command for posting. 5877 * 5878 * Returns: 0 = success, non-zero failure. 5879 **/ 5880 static int 5881 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) 5882 { 5883 struct lpfc_sglq *sglq_entry = NULL; 5884 struct lpfc_sglq *sglq_entry_next = NULL; 5885 struct lpfc_sglq *sglq_entry_first = NULL; 5886 int status, post_cnt = 0, num_posted = 0, block_cnt = 0; 5887 int last_xritag = NO_XRI; 5888 LIST_HEAD(prep_sgl_list); 5889 LIST_HEAD(blck_sgl_list); 5890 LIST_HEAD(allc_sgl_list); 5891 LIST_HEAD(post_sgl_list); 5892 LIST_HEAD(free_sgl_list); 5893 5894 spin_lock(&phba->hbalock); 5895 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 5896 spin_unlock(&phba->hbalock); 5897 5898 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 5899 &allc_sgl_list, list) { 5900 list_del_init(&sglq_entry->list); 5901 block_cnt++; 5902 if ((last_xritag != NO_XRI) && 5903 (sglq_entry->sli4_xritag != last_xritag + 1)) { 5904 /* a hole in xri block, form a sgl posting block */ 5905 list_splice_init(&prep_sgl_list, &blck_sgl_list); 5906 post_cnt = block_cnt - 1; 5907 /* prepare list for next posting block */ 5908 list_add_tail(&sglq_entry->list, &prep_sgl_list); 5909 block_cnt = 1; 5910 } else { 5911 /* prepare list for next posting block */ 5912 list_add_tail(&sglq_entry->list, &prep_sgl_list); 5913 /* enough sgls for non-embed sgl mbox command */ 5914 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 5915 list_splice_init(&prep_sgl_list, 5916 &blck_sgl_list); 5917 post_cnt = block_cnt; 5918 block_cnt = 0; 5919 } 5920 } 5921 num_posted++; 5922 5923 /* keep track of last sgl's xritag */ 5924 last_xritag = sglq_entry->sli4_xritag; 5925 5926 /* end of repost sgl list condition for els buffers */ 5927 if (num_posted == phba->sli4_hba.els_xri_cnt) { 5928 if (post_cnt == 0) { 5929 list_splice_init(&prep_sgl_list, 5930 &blck_sgl_list); 5931 post_cnt = block_cnt; 5932 } else if (block_cnt == 1) { 5933 status = lpfc_sli4_post_sgl(phba, 5934 sglq_entry->phys, 0, 5935 sglq_entry->sli4_xritag); 5936 if (!status) { 5937 /* successful, put sgl to posted list */ 5938 list_add_tail(&sglq_entry->list, 5939 &post_sgl_list); 5940 } else { 5941 /* Failure, put sgl to free list */ 5942 lpfc_printf_log(phba, KERN_WARNING, 5943 LOG_SLI, 5944 "3159 Failed to post els " 5945 "sgl, xritag:x%x\n", 5946 sglq_entry->sli4_xritag); 5947 list_add_tail(&sglq_entry->list, 5948 &free_sgl_list); 5949 spin_lock_irq(&phba->hbalock); 5950 phba->sli4_hba.els_xri_cnt--; 5951 spin_unlock_irq(&phba->hbalock); 5952 } 5953 } 5954 } 5955 5956 /* continue until a nembed page worth of sgls */ 5957 if (post_cnt == 0) 5958 continue; 5959 5960 /* post the els buffer list sgls as a block */ 5961 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list, 5962 post_cnt); 5963 5964 if (!status) { 5965 /* success, put sgl list to posted sgl list */ 5966 list_splice_init(&blck_sgl_list, &post_sgl_list); 5967 } else { 5968 /* Failure, put sgl list to free sgl list */ 5969 sglq_entry_first = list_first_entry(&blck_sgl_list, 5970 struct lpfc_sglq, 5971 list); 5972 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5973 "3160 Failed to post els sgl-list, " 5974 "xritag:x%x-x%x\n", 5975 sglq_entry_first->sli4_xritag, 5976 (sglq_entry_first->sli4_xritag + 5977 post_cnt - 1)); 5978 list_splice_init(&blck_sgl_list, &free_sgl_list); 5979 spin_lock_irq(&phba->hbalock); 5980 phba->sli4_hba.els_xri_cnt -= post_cnt; 5981 spin_unlock_irq(&phba->hbalock); 5982 } 5983 5984 /* don't reset xirtag due to hole in xri block */ 5985 if (block_cnt == 0) 5986 last_xritag = NO_XRI; 5987 5988 /* reset els sgl post count for next round of posting */ 5989 post_cnt = 0; 5990 } 5991 5992 /* free the els sgls failed to post */ 5993 lpfc_free_sgl_list(phba, &free_sgl_list); 5994 5995 /* push els sgls posted to the availble list */ 5996 if (!list_empty(&post_sgl_list)) { 5997 spin_lock(&phba->hbalock); 5998 list_splice_init(&post_sgl_list, 5999 &phba->sli4_hba.lpfc_sgl_list); 6000 spin_unlock(&phba->hbalock); 6001 } else { 6002 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6003 "3161 Failure to post els sgl to port.\n"); 6004 return -EIO; 6005 } 6006 return 0; 6007 } 6008 6009 /** 6010 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6011 * @phba: Pointer to HBA context object. 6012 * 6013 * This function is the main SLI4 device intialization PCI function. This 6014 * function is called by the HBA intialization code, HBA reset code and 6015 * HBA error attention handler code. Caller is not required to hold any 6016 * locks. 6017 **/ 6018 int 6019 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6020 { 6021 int rc; 6022 LPFC_MBOXQ_t *mboxq; 6023 struct lpfc_mqe *mqe; 6024 uint8_t *vpd; 6025 uint32_t vpd_size; 6026 uint32_t ftr_rsp = 0; 6027 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 6028 struct lpfc_vport *vport = phba->pport; 6029 struct lpfc_dmabuf *mp; 6030 6031 /* Perform a PCI function reset to start from clean */ 6032 rc = lpfc_pci_function_reset(phba); 6033 if (unlikely(rc)) 6034 return -ENODEV; 6035 6036 /* Check the HBA Host Status Register for readyness */ 6037 rc = lpfc_sli4_post_status_check(phba); 6038 if (unlikely(rc)) 6039 return -ENODEV; 6040 else { 6041 spin_lock_irq(&phba->hbalock); 6042 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 6043 spin_unlock_irq(&phba->hbalock); 6044 } 6045 6046 /* 6047 * Allocate a single mailbox container for initializing the 6048 * port. 6049 */ 6050 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6051 if (!mboxq) 6052 return -ENOMEM; 6053 6054 /* Issue READ_REV to collect vpd and FW information. */ 6055 vpd_size = SLI4_PAGE_SIZE; 6056 vpd = kzalloc(vpd_size, GFP_KERNEL); 6057 if (!vpd) { 6058 rc = -ENOMEM; 6059 goto out_free_mbox; 6060 } 6061 6062 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 6063 if (unlikely(rc)) { 6064 kfree(vpd); 6065 goto out_free_mbox; 6066 } 6067 mqe = &mboxq->u.mqe; 6068 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6069 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 6070 phba->hba_flag |= HBA_FCOE_MODE; 6071 else 6072 phba->hba_flag &= ~HBA_FCOE_MODE; 6073 6074 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6075 LPFC_DCBX_CEE_MODE) 6076 phba->hba_flag |= HBA_FIP_SUPPORT; 6077 else 6078 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6079 6080 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 6081 6082 if (phba->sli_rev != LPFC_SLI_REV4) { 6083 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6084 "0376 READ_REV Error. SLI Level %d " 6085 "FCoE enabled %d\n", 6086 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 6087 rc = -EIO; 6088 kfree(vpd); 6089 goto out_free_mbox; 6090 } 6091 6092 /* 6093 * Continue initialization with default values even if driver failed 6094 * to read FCoE param config regions, only read parameters if the 6095 * board is FCoE 6096 */ 6097 if (phba->hba_flag & HBA_FCOE_MODE && 6098 lpfc_sli4_read_fcoe_params(phba)) 6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 6100 "2570 Failed to read FCoE parameters\n"); 6101 6102 /* 6103 * Retrieve sli4 device physical port name, failure of doing it 6104 * is considered as non-fatal. 6105 */ 6106 rc = lpfc_sli4_retrieve_pport_name(phba); 6107 if (!rc) 6108 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6109 "3080 Successful retrieving SLI4 device " 6110 "physical port name: %s.\n", phba->Port); 6111 6112 /* 6113 * Evaluate the read rev and vpd data. Populate the driver 6114 * state with the results. If this routine fails, the failure 6115 * is not fatal as the driver will use generic values. 6116 */ 6117 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 6118 if (unlikely(!rc)) { 6119 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6120 "0377 Error %d parsing vpd. " 6121 "Using defaults.\n", rc); 6122 rc = 0; 6123 } 6124 kfree(vpd); 6125 6126 /* Save information as VPD data */ 6127 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 6128 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 6129 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 6130 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 6131 &mqe->un.read_rev); 6132 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 6133 &mqe->un.read_rev); 6134 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 6135 &mqe->un.read_rev); 6136 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 6137 &mqe->un.read_rev); 6138 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 6139 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6140 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6141 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6142 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6143 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6144 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6145 "(%d):0380 READ_REV Status x%x " 6146 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6147 mboxq->vport ? mboxq->vport->vpi : 0, 6148 bf_get(lpfc_mqe_status, mqe), 6149 phba->vpd.rev.opFwName, 6150 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6151 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6152 6153 /* 6154 * Discover the port's supported feature set and match it against the 6155 * hosts requests. 6156 */ 6157 lpfc_request_features(phba, mboxq); 6158 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6159 if (unlikely(rc)) { 6160 rc = -EIO; 6161 goto out_free_mbox; 6162 } 6163 6164 /* 6165 * The port must support FCP initiator mode as this is the 6166 * only mode running in the host. 6167 */ 6168 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6169 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6170 "0378 No support for fcpi mode.\n"); 6171 ftr_rsp++; 6172 } 6173 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6174 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6175 else 6176 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6177 /* 6178 * If the port cannot support the host's requested features 6179 * then turn off the global config parameters to disable the 6180 * feature in the driver. This is not a fatal error. 6181 */ 6182 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6183 if (phba->cfg_enable_bg) { 6184 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6185 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6186 else 6187 ftr_rsp++; 6188 } 6189 6190 if (phba->max_vpi && phba->cfg_enable_npiv && 6191 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6192 ftr_rsp++; 6193 6194 if (ftr_rsp) { 6195 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6196 "0379 Feature Mismatch Data: x%08x %08x " 6197 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6198 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6199 phba->cfg_enable_npiv, phba->max_vpi); 6200 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6201 phba->cfg_enable_bg = 0; 6202 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6203 phba->cfg_enable_npiv = 0; 6204 } 6205 6206 /* These SLI3 features are assumed in SLI4 */ 6207 spin_lock_irq(&phba->hbalock); 6208 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6209 spin_unlock_irq(&phba->hbalock); 6210 6211 /* 6212 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6213 * calls depends on these resources to complete port setup. 6214 */ 6215 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6216 if (rc) { 6217 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6218 "2920 Failed to alloc Resource IDs " 6219 "rc = x%x\n", rc); 6220 goto out_free_mbox; 6221 } 6222 6223 /* Read the port's service parameters. */ 6224 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6225 if (rc) { 6226 phba->link_state = LPFC_HBA_ERROR; 6227 rc = -ENOMEM; 6228 goto out_free_mbox; 6229 } 6230 6231 mboxq->vport = vport; 6232 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6233 mp = (struct lpfc_dmabuf *) mboxq->context1; 6234 if (rc == MBX_SUCCESS) { 6235 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6236 rc = 0; 6237 } 6238 6239 /* 6240 * This memory was allocated by the lpfc_read_sparam routine. Release 6241 * it to the mbuf pool. 6242 */ 6243 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6244 kfree(mp); 6245 mboxq->context1 = NULL; 6246 if (unlikely(rc)) { 6247 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6248 "0382 READ_SPARAM command failed " 6249 "status %d, mbxStatus x%x\n", 6250 rc, bf_get(lpfc_mqe_status, mqe)); 6251 phba->link_state = LPFC_HBA_ERROR; 6252 rc = -EIO; 6253 goto out_free_mbox; 6254 } 6255 6256 lpfc_update_vport_wwn(vport); 6257 6258 /* Update the fc_host data structures with new wwn. */ 6259 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6260 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6261 6262 /* update host els and scsi xri-sgl sizes and mappings */ 6263 rc = lpfc_sli4_xri_sgl_update(phba); 6264 if (unlikely(rc)) { 6265 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6266 "1400 Failed to update xri-sgl size and " 6267 "mapping: %d\n", rc); 6268 goto out_free_mbox; 6269 } 6270 6271 /* register the els sgl pool to the port */ 6272 rc = lpfc_sli4_repost_els_sgl_list(phba); 6273 if (unlikely(rc)) { 6274 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6275 "0582 Error %d during els sgl post " 6276 "operation\n", rc); 6277 rc = -ENODEV; 6278 goto out_free_mbox; 6279 } 6280 6281 /* register the allocated scsi sgl pool to the port */ 6282 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6283 if (unlikely(rc)) { 6284 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6285 "0383 Error %d during scsi sgl post " 6286 "operation\n", rc); 6287 /* Some Scsi buffers were moved to the abort scsi list */ 6288 /* A pci function reset will repost them */ 6289 rc = -ENODEV; 6290 goto out_free_mbox; 6291 } 6292 6293 /* Post the rpi header region to the device. */ 6294 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 6295 if (unlikely(rc)) { 6296 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6297 "0393 Error %d during rpi post operation\n", 6298 rc); 6299 rc = -ENODEV; 6300 goto out_free_mbox; 6301 } 6302 lpfc_sli4_node_prep(phba); 6303 6304 /* Create all the SLI4 queues */ 6305 rc = lpfc_sli4_queue_create(phba); 6306 if (rc) { 6307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6308 "3089 Failed to allocate queues\n"); 6309 rc = -ENODEV; 6310 goto out_stop_timers; 6311 } 6312 /* Set up all the queues to the device */ 6313 rc = lpfc_sli4_queue_setup(phba); 6314 if (unlikely(rc)) { 6315 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6316 "0381 Error %d during queue setup.\n ", rc); 6317 goto out_destroy_queue; 6318 } 6319 6320 /* Arm the CQs and then EQs on device */ 6321 lpfc_sli4_arm_cqeq_intr(phba); 6322 6323 /* Indicate device interrupt mode */ 6324 phba->sli4_hba.intr_enable = 1; 6325 6326 /* Allow asynchronous mailbox command to go through */ 6327 spin_lock_irq(&phba->hbalock); 6328 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6329 spin_unlock_irq(&phba->hbalock); 6330 6331 /* Post receive buffers to the device */ 6332 lpfc_sli4_rb_setup(phba); 6333 6334 /* Reset HBA FCF states after HBA reset */ 6335 phba->fcf.fcf_flag = 0; 6336 phba->fcf.current_rec.flag = 0; 6337 6338 /* Start the ELS watchdog timer */ 6339 mod_timer(&vport->els_tmofunc, 6340 jiffies + HZ * (phba->fc_ratov * 2)); 6341 6342 /* Start heart beat timer */ 6343 mod_timer(&phba->hb_tmofunc, 6344 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 6345 phba->hb_outstanding = 0; 6346 phba->last_completion_time = jiffies; 6347 6348 /* Start error attention (ERATT) polling timer */ 6349 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 6350 6351 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6352 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6353 rc = pci_enable_pcie_error_reporting(phba->pcidev); 6354 if (!rc) { 6355 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6356 "2829 This device supports " 6357 "Advanced Error Reporting (AER)\n"); 6358 spin_lock_irq(&phba->hbalock); 6359 phba->hba_flag |= HBA_AER_ENABLED; 6360 spin_unlock_irq(&phba->hbalock); 6361 } else { 6362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6363 "2830 This device does not support " 6364 "Advanced Error Reporting (AER)\n"); 6365 phba->cfg_aer_support = 0; 6366 } 6367 rc = 0; 6368 } 6369 6370 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 6371 /* 6372 * The FC Port needs to register FCFI (index 0) 6373 */ 6374 lpfc_reg_fcfi(phba, mboxq); 6375 mboxq->vport = phba->pport; 6376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6377 if (rc != MBX_SUCCESS) 6378 goto out_unset_queue; 6379 rc = 0; 6380 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6381 &mboxq->u.mqe.un.reg_fcfi); 6382 6383 /* Check if the port is configured to be disabled */ 6384 lpfc_sli_read_link_ste(phba); 6385 } 6386 6387 /* 6388 * The port is ready, set the host's link state to LINK_DOWN 6389 * in preparation for link interrupts. 6390 */ 6391 spin_lock_irq(&phba->hbalock); 6392 phba->link_state = LPFC_LINK_DOWN; 6393 spin_unlock_irq(&phba->hbalock); 6394 if (!(phba->hba_flag & HBA_FCOE_MODE) && 6395 (phba->hba_flag & LINK_DISABLED)) { 6396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6397 "3103 Adapter Link is disabled.\n"); 6398 lpfc_down_link(phba, mboxq); 6399 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6400 if (rc != MBX_SUCCESS) { 6401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6402 "3104 Adapter failed to issue " 6403 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 6404 goto out_unset_queue; 6405 } 6406 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 6407 /* don't perform init_link on SLI4 FC port loopback test */ 6408 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 6409 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6410 if (rc) 6411 goto out_unset_queue; 6412 } 6413 } 6414 mempool_free(mboxq, phba->mbox_mem_pool); 6415 return rc; 6416 out_unset_queue: 6417 /* Unset all the queues set up in this routine when error out */ 6418 lpfc_sli4_queue_unset(phba); 6419 out_destroy_queue: 6420 lpfc_sli4_queue_destroy(phba); 6421 out_stop_timers: 6422 lpfc_stop_hba_timers(phba); 6423 out_free_mbox: 6424 mempool_free(mboxq, phba->mbox_mem_pool); 6425 return rc; 6426 } 6427 6428 /** 6429 * lpfc_mbox_timeout - Timeout call back function for mbox timer 6430 * @ptr: context object - pointer to hba structure. 6431 * 6432 * This is the callback function for mailbox timer. The mailbox 6433 * timer is armed when a new mailbox command is issued and the timer 6434 * is deleted when the mailbox complete. The function is called by 6435 * the kernel timer code when a mailbox does not complete within 6436 * expected time. This function wakes up the worker thread to 6437 * process the mailbox timeout and returns. All the processing is 6438 * done by the worker thread function lpfc_mbox_timeout_handler. 6439 **/ 6440 void 6441 lpfc_mbox_timeout(unsigned long ptr) 6442 { 6443 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 6444 unsigned long iflag; 6445 uint32_t tmo_posted; 6446 6447 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 6448 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 6449 if (!tmo_posted) 6450 phba->pport->work_port_events |= WORKER_MBOX_TMO; 6451 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 6452 6453 if (!tmo_posted) 6454 lpfc_worker_wake_up(phba); 6455 return; 6456 } 6457 6458 6459 /** 6460 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 6461 * @phba: Pointer to HBA context object. 6462 * 6463 * This function is called from worker thread when a mailbox command times out. 6464 * The caller is not required to hold any locks. This function will reset the 6465 * HBA and recover all the pending commands. 6466 **/ 6467 void 6468 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6469 { 6470 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6471 MAILBOX_t *mb = &pmbox->u.mb; 6472 struct lpfc_sli *psli = &phba->sli; 6473 struct lpfc_sli_ring *pring; 6474 6475 /* Check the pmbox pointer first. There is a race condition 6476 * between the mbox timeout handler getting executed in the 6477 * worklist and the mailbox actually completing. When this 6478 * race condition occurs, the mbox_active will be NULL. 6479 */ 6480 spin_lock_irq(&phba->hbalock); 6481 if (pmbox == NULL) { 6482 lpfc_printf_log(phba, KERN_WARNING, 6483 LOG_MBOX | LOG_SLI, 6484 "0353 Active Mailbox cleared - mailbox timeout " 6485 "exiting\n"); 6486 spin_unlock_irq(&phba->hbalock); 6487 return; 6488 } 6489 6490 /* Mbox cmd <mbxCommand> timeout */ 6491 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6492 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 6493 mb->mbxCommand, 6494 phba->pport->port_state, 6495 phba->sli.sli_flag, 6496 phba->sli.mbox_active); 6497 spin_unlock_irq(&phba->hbalock); 6498 6499 /* Setting state unknown so lpfc_sli_abort_iocb_ring 6500 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 6501 * it to fail all outstanding SCSI IO. 6502 */ 6503 spin_lock_irq(&phba->pport->work_port_lock); 6504 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6505 spin_unlock_irq(&phba->pport->work_port_lock); 6506 spin_lock_irq(&phba->hbalock); 6507 phba->link_state = LPFC_LINK_UNKNOWN; 6508 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6509 spin_unlock_irq(&phba->hbalock); 6510 6511 pring = &psli->ring[psli->fcp_ring]; 6512 lpfc_sli_abort_iocb_ring(phba, pring); 6513 6514 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6515 "0345 Resetting board due to mailbox timeout\n"); 6516 6517 /* Reset the HBA device */ 6518 lpfc_reset_hba(phba); 6519 } 6520 6521 /** 6522 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 6523 * @phba: Pointer to HBA context object. 6524 * @pmbox: Pointer to mailbox object. 6525 * @flag: Flag indicating how the mailbox need to be processed. 6526 * 6527 * This function is called by discovery code and HBA management code 6528 * to submit a mailbox command to firmware with SLI-3 interface spec. This 6529 * function gets the hbalock to protect the data structures. 6530 * The mailbox command can be submitted in polling mode, in which case 6531 * this function will wait in a polling loop for the completion of the 6532 * mailbox. 6533 * If the mailbox is submitted in no_wait mode (not polling) the 6534 * function will submit the command and returns immediately without waiting 6535 * for the mailbox completion. The no_wait is supported only when HBA 6536 * is in SLI2/SLI3 mode - interrupts are enabled. 6537 * The SLI interface allows only one mailbox pending at a time. If the 6538 * mailbox is issued in polling mode and there is already a mailbox 6539 * pending, then the function will return an error. If the mailbox is issued 6540 * in NO_WAIT mode and there is a mailbox pending already, the function 6541 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 6542 * The sli layer owns the mailbox object until the completion of mailbox 6543 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 6544 * return codes the caller owns the mailbox command after the return of 6545 * the function. 6546 **/ 6547 static int 6548 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 6549 uint32_t flag) 6550 { 6551 MAILBOX_t *mb; 6552 struct lpfc_sli *psli = &phba->sli; 6553 uint32_t status, evtctr; 6554 uint32_t ha_copy, hc_copy; 6555 int i; 6556 unsigned long timeout; 6557 unsigned long drvr_flag = 0; 6558 uint32_t word0, ldata; 6559 void __iomem *to_slim; 6560 int processing_queue = 0; 6561 6562 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6563 if (!pmbox) { 6564 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6565 /* processing mbox queue from intr_handler */ 6566 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6567 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6568 return MBX_SUCCESS; 6569 } 6570 processing_queue = 1; 6571 pmbox = lpfc_mbox_get(phba); 6572 if (!pmbox) { 6573 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6574 return MBX_SUCCESS; 6575 } 6576 } 6577 6578 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 6579 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 6580 if(!pmbox->vport) { 6581 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6582 lpfc_printf_log(phba, KERN_ERR, 6583 LOG_MBOX | LOG_VPORT, 6584 "1806 Mbox x%x failed. No vport\n", 6585 pmbox->u.mb.mbxCommand); 6586 dump_stack(); 6587 goto out_not_finished; 6588 } 6589 } 6590 6591 /* If the PCI channel is in offline state, do not post mbox. */ 6592 if (unlikely(pci_channel_offline(phba->pcidev))) { 6593 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6594 goto out_not_finished; 6595 } 6596 6597 /* If HBA has a deferred error attention, fail the iocb. */ 6598 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 6599 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6600 goto out_not_finished; 6601 } 6602 6603 psli = &phba->sli; 6604 6605 mb = &pmbox->u.mb; 6606 status = MBX_SUCCESS; 6607 6608 if (phba->link_state == LPFC_HBA_ERROR) { 6609 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6610 6611 /* Mbox command <mbxCommand> cannot issue */ 6612 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6613 "(%d):0311 Mailbox command x%x cannot " 6614 "issue Data: x%x x%x\n", 6615 pmbox->vport ? pmbox->vport->vpi : 0, 6616 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6617 goto out_not_finished; 6618 } 6619 6620 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 6621 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 6622 !(hc_copy & HC_MBINT_ENA)) { 6623 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6624 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6625 "(%d):2528 Mailbox command x%x cannot " 6626 "issue Data: x%x x%x\n", 6627 pmbox->vport ? pmbox->vport->vpi : 0, 6628 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6629 goto out_not_finished; 6630 } 6631 } 6632 6633 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6634 /* Polling for a mbox command when another one is already active 6635 * is not allowed in SLI. Also, the driver must have established 6636 * SLI2 mode to queue and process multiple mbox commands. 6637 */ 6638 6639 if (flag & MBX_POLL) { 6640 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6641 6642 /* Mbox command <mbxCommand> cannot issue */ 6643 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6644 "(%d):2529 Mailbox command x%x " 6645 "cannot issue Data: x%x x%x\n", 6646 pmbox->vport ? pmbox->vport->vpi : 0, 6647 pmbox->u.mb.mbxCommand, 6648 psli->sli_flag, flag); 6649 goto out_not_finished; 6650 } 6651 6652 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 6653 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6654 /* Mbox command <mbxCommand> cannot issue */ 6655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6656 "(%d):2530 Mailbox command x%x " 6657 "cannot issue Data: x%x x%x\n", 6658 pmbox->vport ? pmbox->vport->vpi : 0, 6659 pmbox->u.mb.mbxCommand, 6660 psli->sli_flag, flag); 6661 goto out_not_finished; 6662 } 6663 6664 /* Another mailbox command is still being processed, queue this 6665 * command to be processed later. 6666 */ 6667 lpfc_mbox_put(phba, pmbox); 6668 6669 /* Mbox cmd issue - BUSY */ 6670 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6671 "(%d):0308 Mbox cmd issue - BUSY Data: " 6672 "x%x x%x x%x x%x\n", 6673 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 6674 mb->mbxCommand, phba->pport->port_state, 6675 psli->sli_flag, flag); 6676 6677 psli->slistat.mbox_busy++; 6678 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6679 6680 if (pmbox->vport) { 6681 lpfc_debugfs_disc_trc(pmbox->vport, 6682 LPFC_DISC_TRC_MBOX_VPORT, 6683 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 6684 (uint32_t)mb->mbxCommand, 6685 mb->un.varWords[0], mb->un.varWords[1]); 6686 } 6687 else { 6688 lpfc_debugfs_disc_trc(phba->pport, 6689 LPFC_DISC_TRC_MBOX, 6690 "MBOX Bsy: cmd:x%x mb:x%x x%x", 6691 (uint32_t)mb->mbxCommand, 6692 mb->un.varWords[0], mb->un.varWords[1]); 6693 } 6694 6695 return MBX_BUSY; 6696 } 6697 6698 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6699 6700 /* If we are not polling, we MUST be in SLI2 mode */ 6701 if (flag != MBX_POLL) { 6702 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 6703 (mb->mbxCommand != MBX_KILL_BOARD)) { 6704 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6705 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6706 /* Mbox command <mbxCommand> cannot issue */ 6707 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6708 "(%d):2531 Mailbox command x%x " 6709 "cannot issue Data: x%x x%x\n", 6710 pmbox->vport ? pmbox->vport->vpi : 0, 6711 pmbox->u.mb.mbxCommand, 6712 psli->sli_flag, flag); 6713 goto out_not_finished; 6714 } 6715 /* timeout active mbox command */ 6716 mod_timer(&psli->mbox_tmo, (jiffies + 6717 (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); 6718 } 6719 6720 /* Mailbox cmd <cmd> issue */ 6721 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6722 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 6723 "x%x\n", 6724 pmbox->vport ? pmbox->vport->vpi : 0, 6725 mb->mbxCommand, phba->pport->port_state, 6726 psli->sli_flag, flag); 6727 6728 if (mb->mbxCommand != MBX_HEARTBEAT) { 6729 if (pmbox->vport) { 6730 lpfc_debugfs_disc_trc(pmbox->vport, 6731 LPFC_DISC_TRC_MBOX_VPORT, 6732 "MBOX Send vport: cmd:x%x mb:x%x x%x", 6733 (uint32_t)mb->mbxCommand, 6734 mb->un.varWords[0], mb->un.varWords[1]); 6735 } 6736 else { 6737 lpfc_debugfs_disc_trc(phba->pport, 6738 LPFC_DISC_TRC_MBOX, 6739 "MBOX Send: cmd:x%x mb:x%x x%x", 6740 (uint32_t)mb->mbxCommand, 6741 mb->un.varWords[0], mb->un.varWords[1]); 6742 } 6743 } 6744 6745 psli->slistat.mbox_cmd++; 6746 evtctr = psli->slistat.mbox_event; 6747 6748 /* next set own bit for the adapter and copy over command word */ 6749 mb->mbxOwner = OWN_CHIP; 6750 6751 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6752 /* Populate mbox extension offset word. */ 6753 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 6754 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6755 = (uint8_t *)phba->mbox_ext 6756 - (uint8_t *)phba->mbox; 6757 } 6758 6759 /* Copy the mailbox extension data */ 6760 if (pmbox->in_ext_byte_len && pmbox->context2) { 6761 lpfc_sli_pcimem_bcopy(pmbox->context2, 6762 (uint8_t *)phba->mbox_ext, 6763 pmbox->in_ext_byte_len); 6764 } 6765 /* Copy command data to host SLIM area */ 6766 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6767 } else { 6768 /* Populate mbox extension offset word. */ 6769 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 6770 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6771 = MAILBOX_HBA_EXT_OFFSET; 6772 6773 /* Copy the mailbox extension data */ 6774 if (pmbox->in_ext_byte_len && pmbox->context2) { 6775 lpfc_memcpy_to_slim(phba->MBslimaddr + 6776 MAILBOX_HBA_EXT_OFFSET, 6777 pmbox->context2, pmbox->in_ext_byte_len); 6778 6779 } 6780 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6781 /* copy command data into host mbox for cmpl */ 6782 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6783 } 6784 6785 /* First copy mbox command data to HBA SLIM, skip past first 6786 word */ 6787 to_slim = phba->MBslimaddr + sizeof (uint32_t); 6788 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 6789 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 6790 6791 /* Next copy over first word, with mbxOwner set */ 6792 ldata = *((uint32_t *)mb); 6793 to_slim = phba->MBslimaddr; 6794 writel(ldata, to_slim); 6795 readl(to_slim); /* flush */ 6796 6797 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6798 /* switch over to host mailbox */ 6799 psli->sli_flag |= LPFC_SLI_ACTIVE; 6800 } 6801 } 6802 6803 wmb(); 6804 6805 switch (flag) { 6806 case MBX_NOWAIT: 6807 /* Set up reference to mailbox command */ 6808 psli->mbox_active = pmbox; 6809 /* Interrupt board to do it */ 6810 writel(CA_MBATT, phba->CAregaddr); 6811 readl(phba->CAregaddr); /* flush */ 6812 /* Don't wait for it to finish, just return */ 6813 break; 6814 6815 case MBX_POLL: 6816 /* Set up null reference to mailbox command */ 6817 psli->mbox_active = NULL; 6818 /* Interrupt board to do it */ 6819 writel(CA_MBATT, phba->CAregaddr); 6820 readl(phba->CAregaddr); /* flush */ 6821 6822 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6823 /* First read mbox status word */ 6824 word0 = *((uint32_t *)phba->mbox); 6825 word0 = le32_to_cpu(word0); 6826 } else { 6827 /* First read mbox status word */ 6828 if (lpfc_readl(phba->MBslimaddr, &word0)) { 6829 spin_unlock_irqrestore(&phba->hbalock, 6830 drvr_flag); 6831 goto out_not_finished; 6832 } 6833 } 6834 6835 /* Read the HBA Host Attention Register */ 6836 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6837 spin_unlock_irqrestore(&phba->hbalock, 6838 drvr_flag); 6839 goto out_not_finished; 6840 } 6841 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 6842 1000) + jiffies; 6843 i = 0; 6844 /* Wait for command to complete */ 6845 while (((word0 & OWN_CHIP) == OWN_CHIP) || 6846 (!(ha_copy & HA_MBATT) && 6847 (phba->link_state > LPFC_WARM_START))) { 6848 if (time_after(jiffies, timeout)) { 6849 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6850 spin_unlock_irqrestore(&phba->hbalock, 6851 drvr_flag); 6852 goto out_not_finished; 6853 } 6854 6855 /* Check if we took a mbox interrupt while we were 6856 polling */ 6857 if (((word0 & OWN_CHIP) != OWN_CHIP) 6858 && (evtctr != psli->slistat.mbox_event)) 6859 break; 6860 6861 if (i++ > 10) { 6862 spin_unlock_irqrestore(&phba->hbalock, 6863 drvr_flag); 6864 msleep(1); 6865 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6866 } 6867 6868 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6869 /* First copy command data */ 6870 word0 = *((uint32_t *)phba->mbox); 6871 word0 = le32_to_cpu(word0); 6872 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6873 MAILBOX_t *slimmb; 6874 uint32_t slimword0; 6875 /* Check real SLIM for any errors */ 6876 slimword0 = readl(phba->MBslimaddr); 6877 slimmb = (MAILBOX_t *) & slimword0; 6878 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 6879 && slimmb->mbxStatus) { 6880 psli->sli_flag &= 6881 ~LPFC_SLI_ACTIVE; 6882 word0 = slimword0; 6883 } 6884 } 6885 } else { 6886 /* First copy command data */ 6887 word0 = readl(phba->MBslimaddr); 6888 } 6889 /* Read the HBA Host Attention Register */ 6890 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6891 spin_unlock_irqrestore(&phba->hbalock, 6892 drvr_flag); 6893 goto out_not_finished; 6894 } 6895 } 6896 6897 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6898 /* copy results back to user */ 6899 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 6900 /* Copy the mailbox extension data */ 6901 if (pmbox->out_ext_byte_len && pmbox->context2) { 6902 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 6903 pmbox->context2, 6904 pmbox->out_ext_byte_len); 6905 } 6906 } else { 6907 /* First copy command data */ 6908 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 6909 MAILBOX_CMD_SIZE); 6910 /* Copy the mailbox extension data */ 6911 if (pmbox->out_ext_byte_len && pmbox->context2) { 6912 lpfc_memcpy_from_slim(pmbox->context2, 6913 phba->MBslimaddr + 6914 MAILBOX_HBA_EXT_OFFSET, 6915 pmbox->out_ext_byte_len); 6916 } 6917 } 6918 6919 writel(HA_MBATT, phba->HAregaddr); 6920 readl(phba->HAregaddr); /* flush */ 6921 6922 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6923 status = mb->mbxStatus; 6924 } 6925 6926 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6927 return status; 6928 6929 out_not_finished: 6930 if (processing_queue) { 6931 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 6932 lpfc_mbox_cmpl_put(phba, pmbox); 6933 } 6934 return MBX_NOT_FINISHED; 6935 } 6936 6937 /** 6938 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 6939 * @phba: Pointer to HBA context object. 6940 * 6941 * The function blocks the posting of SLI4 asynchronous mailbox commands from 6942 * the driver internal pending mailbox queue. It will then try to wait out the 6943 * possible outstanding mailbox command before return. 6944 * 6945 * Returns: 6946 * 0 - the outstanding mailbox command completed; otherwise, the wait for 6947 * the outstanding mailbox command timed out. 6948 **/ 6949 static int 6950 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 6951 { 6952 struct lpfc_sli *psli = &phba->sli; 6953 int rc = 0; 6954 unsigned long timeout = 0; 6955 6956 /* Mark the asynchronous mailbox command posting as blocked */ 6957 spin_lock_irq(&phba->hbalock); 6958 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6959 /* Determine how long we might wait for the active mailbox 6960 * command to be gracefully completed by firmware. 6961 */ 6962 if (phba->sli.mbox_active) 6963 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 6964 phba->sli.mbox_active) * 6965 1000) + jiffies; 6966 spin_unlock_irq(&phba->hbalock); 6967 6968 /* Wait for the outstnading mailbox command to complete */ 6969 while (phba->sli.mbox_active) { 6970 /* Check active mailbox complete status every 2ms */ 6971 msleep(2); 6972 if (time_after(jiffies, timeout)) { 6973 /* Timeout, marked the outstanding cmd not complete */ 6974 rc = 1; 6975 break; 6976 } 6977 } 6978 6979 /* Can not cleanly block async mailbox command, fails it */ 6980 if (rc) { 6981 spin_lock_irq(&phba->hbalock); 6982 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6983 spin_unlock_irq(&phba->hbalock); 6984 } 6985 return rc; 6986 } 6987 6988 /** 6989 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 6990 * @phba: Pointer to HBA context object. 6991 * 6992 * The function unblocks and resume posting of SLI4 asynchronous mailbox 6993 * commands from the driver internal pending mailbox queue. It makes sure 6994 * that there is no outstanding mailbox command before resuming posting 6995 * asynchronous mailbox commands. If, for any reason, there is outstanding 6996 * mailbox command, it will try to wait it out before resuming asynchronous 6997 * mailbox command posting. 6998 **/ 6999 static void 7000 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 7001 { 7002 struct lpfc_sli *psli = &phba->sli; 7003 7004 spin_lock_irq(&phba->hbalock); 7005 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7006 /* Asynchronous mailbox posting is not blocked, do nothing */ 7007 spin_unlock_irq(&phba->hbalock); 7008 return; 7009 } 7010 7011 /* Outstanding synchronous mailbox command is guaranteed to be done, 7012 * successful or timeout, after timing-out the outstanding mailbox 7013 * command shall always be removed, so just unblock posting async 7014 * mailbox command and resume 7015 */ 7016 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7017 spin_unlock_irq(&phba->hbalock); 7018 7019 /* wake up worker thread to post asynchronlous mailbox command */ 7020 lpfc_worker_wake_up(phba); 7021 } 7022 7023 /** 7024 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7025 * @phba: Pointer to HBA context object. 7026 * @mboxq: Pointer to mailbox object. 7027 * 7028 * The function posts a mailbox to the port. The mailbox is expected 7029 * to be comletely filled in and ready for the port to operate on it. 7030 * This routine executes a synchronous completion operation on the 7031 * mailbox by polling for its completion. 7032 * 7033 * The caller must not be holding any locks when calling this routine. 7034 * 7035 * Returns: 7036 * MBX_SUCCESS - mailbox posted successfully 7037 * Any of the MBX error values. 7038 **/ 7039 static int 7040 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7041 { 7042 int rc = MBX_SUCCESS; 7043 unsigned long iflag; 7044 uint32_t db_ready; 7045 uint32_t mcqe_status; 7046 uint32_t mbx_cmnd; 7047 unsigned long timeout; 7048 struct lpfc_sli *psli = &phba->sli; 7049 struct lpfc_mqe *mb = &mboxq->u.mqe; 7050 struct lpfc_bmbx_create *mbox_rgn; 7051 struct dma_address *dma_address; 7052 struct lpfc_register bmbx_reg; 7053 7054 /* 7055 * Only one mailbox can be active to the bootstrap mailbox region 7056 * at a time and there is no queueing provided. 7057 */ 7058 spin_lock_irqsave(&phba->hbalock, iflag); 7059 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7060 spin_unlock_irqrestore(&phba->hbalock, iflag); 7061 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7062 "(%d):2532 Mailbox command x%x (x%x/x%x) " 7063 "cannot issue Data: x%x x%x\n", 7064 mboxq->vport ? mboxq->vport->vpi : 0, 7065 mboxq->u.mb.mbxCommand, 7066 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7067 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7068 psli->sli_flag, MBX_POLL); 7069 return MBXERR_ERROR; 7070 } 7071 /* The server grabs the token and owns it until release */ 7072 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7073 phba->sli.mbox_active = mboxq; 7074 spin_unlock_irqrestore(&phba->hbalock, iflag); 7075 7076 /* 7077 * Initialize the bootstrap memory region to avoid stale data areas 7078 * in the mailbox post. Then copy the caller's mailbox contents to 7079 * the bmbx mailbox region. 7080 */ 7081 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 7082 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 7083 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 7084 sizeof(struct lpfc_mqe)); 7085 7086 /* Post the high mailbox dma address to the port and wait for ready. */ 7087 dma_address = &phba->sli4_hba.bmbx.dma_address; 7088 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7089 7090 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7091 * 1000) + jiffies; 7092 do { 7093 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7094 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 7095 if (!db_ready) 7096 msleep(2); 7097 7098 if (time_after(jiffies, timeout)) { 7099 rc = MBXERR_ERROR; 7100 goto exit; 7101 } 7102 } while (!db_ready); 7103 7104 /* Post the low mailbox dma address to the port. */ 7105 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7106 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7107 * 1000) + jiffies; 7108 do { 7109 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7110 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 7111 if (!db_ready) 7112 msleep(2); 7113 7114 if (time_after(jiffies, timeout)) { 7115 rc = MBXERR_ERROR; 7116 goto exit; 7117 } 7118 } while (!db_ready); 7119 7120 /* 7121 * Read the CQ to ensure the mailbox has completed. 7122 * If so, update the mailbox status so that the upper layers 7123 * can complete the request normally. 7124 */ 7125 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 7126 sizeof(struct lpfc_mqe)); 7127 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 7128 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 7129 sizeof(struct lpfc_mcqe)); 7130 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 7131 /* 7132 * When the CQE status indicates a failure and the mailbox status 7133 * indicates success then copy the CQE status into the mailbox status 7134 * (and prefix it with x4000). 7135 */ 7136 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 7137 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 7138 bf_set(lpfc_mqe_status, mb, 7139 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 7140 rc = MBXERR_ERROR; 7141 } else 7142 lpfc_sli4_swap_str(phba, mboxq); 7143 7144 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7145 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 7146 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 7147 " x%x x%x CQ: x%x x%x x%x x%x\n", 7148 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7149 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7150 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7151 bf_get(lpfc_mqe_status, mb), 7152 mb->un.mb_words[0], mb->un.mb_words[1], 7153 mb->un.mb_words[2], mb->un.mb_words[3], 7154 mb->un.mb_words[4], mb->un.mb_words[5], 7155 mb->un.mb_words[6], mb->un.mb_words[7], 7156 mb->un.mb_words[8], mb->un.mb_words[9], 7157 mb->un.mb_words[10], mb->un.mb_words[11], 7158 mb->un.mb_words[12], mboxq->mcqe.word0, 7159 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 7160 mboxq->mcqe.trailer); 7161 exit: 7162 /* We are holding the token, no needed for lock when release */ 7163 spin_lock_irqsave(&phba->hbalock, iflag); 7164 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7165 phba->sli.mbox_active = NULL; 7166 spin_unlock_irqrestore(&phba->hbalock, iflag); 7167 return rc; 7168 } 7169 7170 /** 7171 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 7172 * @phba: Pointer to HBA context object. 7173 * @pmbox: Pointer to mailbox object. 7174 * @flag: Flag indicating how the mailbox need to be processed. 7175 * 7176 * This function is called by discovery code and HBA management code to submit 7177 * a mailbox command to firmware with SLI-4 interface spec. 7178 * 7179 * Return codes the caller owns the mailbox command after the return of the 7180 * function. 7181 **/ 7182 static int 7183 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 7184 uint32_t flag) 7185 { 7186 struct lpfc_sli *psli = &phba->sli; 7187 unsigned long iflags; 7188 int rc; 7189 7190 /* dump from issue mailbox command if setup */ 7191 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 7192 7193 rc = lpfc_mbox_dev_check(phba); 7194 if (unlikely(rc)) { 7195 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7196 "(%d):2544 Mailbox command x%x (x%x/x%x) " 7197 "cannot issue Data: x%x x%x\n", 7198 mboxq->vport ? mboxq->vport->vpi : 0, 7199 mboxq->u.mb.mbxCommand, 7200 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7201 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7202 psli->sli_flag, flag); 7203 goto out_not_finished; 7204 } 7205 7206 /* Detect polling mode and jump to a handler */ 7207 if (!phba->sli4_hba.intr_enable) { 7208 if (flag == MBX_POLL) 7209 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7210 else 7211 rc = -EIO; 7212 if (rc != MBX_SUCCESS) 7213 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7214 "(%d):2541 Mailbox command x%x " 7215 "(x%x/x%x) failure: " 7216 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7217 "Data: x%x x%x\n,", 7218 mboxq->vport ? mboxq->vport->vpi : 0, 7219 mboxq->u.mb.mbxCommand, 7220 lpfc_sli_config_mbox_subsys_get(phba, 7221 mboxq), 7222 lpfc_sli_config_mbox_opcode_get(phba, 7223 mboxq), 7224 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7225 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7226 bf_get(lpfc_mcqe_ext_status, 7227 &mboxq->mcqe), 7228 psli->sli_flag, flag); 7229 return rc; 7230 } else if (flag == MBX_POLL) { 7231 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7232 "(%d):2542 Try to issue mailbox command " 7233 "x%x (x%x/x%x) synchronously ahead of async" 7234 "mailbox command queue: x%x x%x\n", 7235 mboxq->vport ? mboxq->vport->vpi : 0, 7236 mboxq->u.mb.mbxCommand, 7237 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7238 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7239 psli->sli_flag, flag); 7240 /* Try to block the asynchronous mailbox posting */ 7241 rc = lpfc_sli4_async_mbox_block(phba); 7242 if (!rc) { 7243 /* Successfully blocked, now issue sync mbox cmd */ 7244 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7245 if (rc != MBX_SUCCESS) 7246 lpfc_printf_log(phba, KERN_WARNING, 7247 LOG_MBOX | LOG_SLI, 7248 "(%d):2597 Sync Mailbox command " 7249 "x%x (x%x/x%x) failure: " 7250 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7251 "Data: x%x x%x\n,", 7252 mboxq->vport ? mboxq->vport->vpi : 0, 7253 mboxq->u.mb.mbxCommand, 7254 lpfc_sli_config_mbox_subsys_get(phba, 7255 mboxq), 7256 lpfc_sli_config_mbox_opcode_get(phba, 7257 mboxq), 7258 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7259 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7260 bf_get(lpfc_mcqe_ext_status, 7261 &mboxq->mcqe), 7262 psli->sli_flag, flag); 7263 /* Unblock the async mailbox posting afterward */ 7264 lpfc_sli4_async_mbox_unblock(phba); 7265 } 7266 return rc; 7267 } 7268 7269 /* Now, interrupt mode asynchrous mailbox command */ 7270 rc = lpfc_mbox_cmd_check(phba, mboxq); 7271 if (rc) { 7272 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7273 "(%d):2543 Mailbox command x%x (x%x/x%x) " 7274 "cannot issue Data: x%x x%x\n", 7275 mboxq->vport ? mboxq->vport->vpi : 0, 7276 mboxq->u.mb.mbxCommand, 7277 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7278 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7279 psli->sli_flag, flag); 7280 goto out_not_finished; 7281 } 7282 7283 /* Put the mailbox command to the driver internal FIFO */ 7284 psli->slistat.mbox_busy++; 7285 spin_lock_irqsave(&phba->hbalock, iflags); 7286 lpfc_mbox_put(phba, mboxq); 7287 spin_unlock_irqrestore(&phba->hbalock, iflags); 7288 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7289 "(%d):0354 Mbox cmd issue - Enqueue Data: " 7290 "x%x (x%x/x%x) x%x x%x x%x\n", 7291 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 7292 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7293 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7294 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7295 phba->pport->port_state, 7296 psli->sli_flag, MBX_NOWAIT); 7297 /* Wake up worker thread to transport mailbox command from head */ 7298 lpfc_worker_wake_up(phba); 7299 7300 return MBX_BUSY; 7301 7302 out_not_finished: 7303 return MBX_NOT_FINISHED; 7304 } 7305 7306 /** 7307 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 7308 * @phba: Pointer to HBA context object. 7309 * 7310 * This function is called by worker thread to send a mailbox command to 7311 * SLI4 HBA firmware. 7312 * 7313 **/ 7314 int 7315 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 7316 { 7317 struct lpfc_sli *psli = &phba->sli; 7318 LPFC_MBOXQ_t *mboxq; 7319 int rc = MBX_SUCCESS; 7320 unsigned long iflags; 7321 struct lpfc_mqe *mqe; 7322 uint32_t mbx_cmnd; 7323 7324 /* Check interrupt mode before post async mailbox command */ 7325 if (unlikely(!phba->sli4_hba.intr_enable)) 7326 return MBX_NOT_FINISHED; 7327 7328 /* Check for mailbox command service token */ 7329 spin_lock_irqsave(&phba->hbalock, iflags); 7330 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7331 spin_unlock_irqrestore(&phba->hbalock, iflags); 7332 return MBX_NOT_FINISHED; 7333 } 7334 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7335 spin_unlock_irqrestore(&phba->hbalock, iflags); 7336 return MBX_NOT_FINISHED; 7337 } 7338 if (unlikely(phba->sli.mbox_active)) { 7339 spin_unlock_irqrestore(&phba->hbalock, iflags); 7340 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7341 "0384 There is pending active mailbox cmd\n"); 7342 return MBX_NOT_FINISHED; 7343 } 7344 /* Take the mailbox command service token */ 7345 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7346 7347 /* Get the next mailbox command from head of queue */ 7348 mboxq = lpfc_mbox_get(phba); 7349 7350 /* If no more mailbox command waiting for post, we're done */ 7351 if (!mboxq) { 7352 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7353 spin_unlock_irqrestore(&phba->hbalock, iflags); 7354 return MBX_SUCCESS; 7355 } 7356 phba->sli.mbox_active = mboxq; 7357 spin_unlock_irqrestore(&phba->hbalock, iflags); 7358 7359 /* Check device readiness for posting mailbox command */ 7360 rc = lpfc_mbox_dev_check(phba); 7361 if (unlikely(rc)) 7362 /* Driver clean routine will clean up pending mailbox */ 7363 goto out_not_finished; 7364 7365 /* Prepare the mbox command to be posted */ 7366 mqe = &mboxq->u.mqe; 7367 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 7368 7369 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7370 mod_timer(&psli->mbox_tmo, (jiffies + 7371 (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); 7372 7373 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7374 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7375 "x%x x%x\n", 7376 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7377 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7378 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7379 phba->pport->port_state, psli->sli_flag); 7380 7381 if (mbx_cmnd != MBX_HEARTBEAT) { 7382 if (mboxq->vport) { 7383 lpfc_debugfs_disc_trc(mboxq->vport, 7384 LPFC_DISC_TRC_MBOX_VPORT, 7385 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7386 mbx_cmnd, mqe->un.mb_words[0], 7387 mqe->un.mb_words[1]); 7388 } else { 7389 lpfc_debugfs_disc_trc(phba->pport, 7390 LPFC_DISC_TRC_MBOX, 7391 "MBOX Send: cmd:x%x mb:x%x x%x", 7392 mbx_cmnd, mqe->un.mb_words[0], 7393 mqe->un.mb_words[1]); 7394 } 7395 } 7396 psli->slistat.mbox_cmd++; 7397 7398 /* Post the mailbox command to the port */ 7399 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 7400 if (rc != MBX_SUCCESS) { 7401 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7402 "(%d):2533 Mailbox command x%x (x%x/x%x) " 7403 "cannot issue Data: x%x x%x\n", 7404 mboxq->vport ? mboxq->vport->vpi : 0, 7405 mboxq->u.mb.mbxCommand, 7406 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7407 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7408 psli->sli_flag, MBX_NOWAIT); 7409 goto out_not_finished; 7410 } 7411 7412 return rc; 7413 7414 out_not_finished: 7415 spin_lock_irqsave(&phba->hbalock, iflags); 7416 if (phba->sli.mbox_active) { 7417 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7418 __lpfc_mbox_cmpl_put(phba, mboxq); 7419 /* Release the token */ 7420 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7421 phba->sli.mbox_active = NULL; 7422 } 7423 spin_unlock_irqrestore(&phba->hbalock, iflags); 7424 7425 return MBX_NOT_FINISHED; 7426 } 7427 7428 /** 7429 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 7430 * @phba: Pointer to HBA context object. 7431 * @pmbox: Pointer to mailbox object. 7432 * @flag: Flag indicating how the mailbox need to be processed. 7433 * 7434 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 7435 * the API jump table function pointer from the lpfc_hba struct. 7436 * 7437 * Return codes the caller owns the mailbox command after the return of the 7438 * function. 7439 **/ 7440 int 7441 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 7442 { 7443 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 7444 } 7445 7446 /** 7447 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 7448 * @phba: The hba struct for which this call is being executed. 7449 * @dev_grp: The HBA PCI-Device group number. 7450 * 7451 * This routine sets up the mbox interface API function jump table in @phba 7452 * struct. 7453 * Returns: 0 - success, -ENODEV - failure. 7454 **/ 7455 int 7456 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7457 { 7458 7459 switch (dev_grp) { 7460 case LPFC_PCI_DEV_LP: 7461 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 7462 phba->lpfc_sli_handle_slow_ring_event = 7463 lpfc_sli_handle_slow_ring_event_s3; 7464 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 7465 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 7466 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 7467 break; 7468 case LPFC_PCI_DEV_OC: 7469 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 7470 phba->lpfc_sli_handle_slow_ring_event = 7471 lpfc_sli_handle_slow_ring_event_s4; 7472 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 7473 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 7474 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 7475 break; 7476 default: 7477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7478 "1420 Invalid HBA PCI-device group: 0x%x\n", 7479 dev_grp); 7480 return -ENODEV; 7481 break; 7482 } 7483 return 0; 7484 } 7485 7486 /** 7487 * __lpfc_sli_ringtx_put - Add an iocb to the txq 7488 * @phba: Pointer to HBA context object. 7489 * @pring: Pointer to driver SLI ring object. 7490 * @piocb: Pointer to address of newly added command iocb. 7491 * 7492 * This function is called with hbalock held to add a command 7493 * iocb to the txq when SLI layer cannot submit the command iocb 7494 * to the ring. 7495 **/ 7496 void 7497 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7498 struct lpfc_iocbq *piocb) 7499 { 7500 /* Insert the caller's iocb in the txq tail for later processing. */ 7501 list_add_tail(&piocb->list, &pring->txq); 7502 pring->txq_cnt++; 7503 } 7504 7505 /** 7506 * lpfc_sli_next_iocb - Get the next iocb in the txq 7507 * @phba: Pointer to HBA context object. 7508 * @pring: Pointer to driver SLI ring object. 7509 * @piocb: Pointer to address of newly added command iocb. 7510 * 7511 * This function is called with hbalock held before a new 7512 * iocb is submitted to the firmware. This function checks 7513 * txq to flush the iocbs in txq to Firmware before 7514 * submitting new iocbs to the Firmware. 7515 * If there are iocbs in the txq which need to be submitted 7516 * to firmware, lpfc_sli_next_iocb returns the first element 7517 * of the txq after dequeuing it from txq. 7518 * If there is no iocb in the txq then the function will return 7519 * *piocb and *piocb is set to NULL. Caller needs to check 7520 * *piocb to find if there are more commands in the txq. 7521 **/ 7522 static struct lpfc_iocbq * 7523 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7524 struct lpfc_iocbq **piocb) 7525 { 7526 struct lpfc_iocbq * nextiocb; 7527 7528 nextiocb = lpfc_sli_ringtx_get(phba, pring); 7529 if (!nextiocb) { 7530 nextiocb = *piocb; 7531 *piocb = NULL; 7532 } 7533 7534 return nextiocb; 7535 } 7536 7537 /** 7538 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 7539 * @phba: Pointer to HBA context object. 7540 * @ring_number: SLI ring number to issue iocb on. 7541 * @piocb: Pointer to command iocb. 7542 * @flag: Flag indicating if this command can be put into txq. 7543 * 7544 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 7545 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 7546 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 7547 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 7548 * this function allows only iocbs for posting buffers. This function finds 7549 * next available slot in the command ring and posts the command to the 7550 * available slot and writes the port attention register to request HBA start 7551 * processing new iocb. If there is no slot available in the ring and 7552 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 7553 * the function returns IOCB_BUSY. 7554 * 7555 * This function is called with hbalock held. The function will return success 7556 * after it successfully submit the iocb to firmware or after adding to the 7557 * txq. 7558 **/ 7559 static int 7560 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 7561 struct lpfc_iocbq *piocb, uint32_t flag) 7562 { 7563 struct lpfc_iocbq *nextiocb; 7564 IOCB_t *iocb; 7565 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7566 7567 if (piocb->iocb_cmpl && (!piocb->vport) && 7568 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 7569 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 7570 lpfc_printf_log(phba, KERN_ERR, 7571 LOG_SLI | LOG_VPORT, 7572 "1807 IOCB x%x failed. No vport\n", 7573 piocb->iocb.ulpCommand); 7574 dump_stack(); 7575 return IOCB_ERROR; 7576 } 7577 7578 7579 /* If the PCI channel is in offline state, do not post iocbs. */ 7580 if (unlikely(pci_channel_offline(phba->pcidev))) 7581 return IOCB_ERROR; 7582 7583 /* If HBA has a deferred error attention, fail the iocb. */ 7584 if (unlikely(phba->hba_flag & DEFER_ERATT)) 7585 return IOCB_ERROR; 7586 7587 /* 7588 * We should never get an IOCB if we are in a < LINK_DOWN state 7589 */ 7590 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 7591 return IOCB_ERROR; 7592 7593 /* 7594 * Check to see if we are blocking IOCB processing because of a 7595 * outstanding event. 7596 */ 7597 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 7598 goto iocb_busy; 7599 7600 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 7601 /* 7602 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 7603 * can be issued if the link is not up. 7604 */ 7605 switch (piocb->iocb.ulpCommand) { 7606 case CMD_GEN_REQUEST64_CR: 7607 case CMD_GEN_REQUEST64_CX: 7608 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 7609 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 7610 FC_RCTL_DD_UNSOL_CMD) || 7611 (piocb->iocb.un.genreq64.w5.hcsw.Type != 7612 MENLO_TRANSPORT_TYPE)) 7613 7614 goto iocb_busy; 7615 break; 7616 case CMD_QUE_RING_BUF_CN: 7617 case CMD_QUE_RING_BUF64_CN: 7618 /* 7619 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 7620 * completion, iocb_cmpl MUST be 0. 7621 */ 7622 if (piocb->iocb_cmpl) 7623 piocb->iocb_cmpl = NULL; 7624 /*FALLTHROUGH*/ 7625 case CMD_CREATE_XRI_CR: 7626 case CMD_CLOSE_XRI_CN: 7627 case CMD_CLOSE_XRI_CX: 7628 break; 7629 default: 7630 goto iocb_busy; 7631 } 7632 7633 /* 7634 * For FCP commands, we must be in a state where we can process link 7635 * attention events. 7636 */ 7637 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 7638 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 7639 goto iocb_busy; 7640 } 7641 7642 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 7643 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 7644 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 7645 7646 if (iocb) 7647 lpfc_sli_update_ring(phba, pring); 7648 else 7649 lpfc_sli_update_full_ring(phba, pring); 7650 7651 if (!piocb) 7652 return IOCB_SUCCESS; 7653 7654 goto out_busy; 7655 7656 iocb_busy: 7657 pring->stats.iocb_cmd_delay++; 7658 7659 out_busy: 7660 7661 if (!(flag & SLI_IOCB_RET_IOCB)) { 7662 __lpfc_sli_ringtx_put(phba, pring, piocb); 7663 return IOCB_SUCCESS; 7664 } 7665 7666 return IOCB_BUSY; 7667 } 7668 7669 /** 7670 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 7671 * @phba: Pointer to HBA context object. 7672 * @piocb: Pointer to command iocb. 7673 * @sglq: Pointer to the scatter gather queue object. 7674 * 7675 * This routine converts the bpl or bde that is in the IOCB 7676 * to a sgl list for the sli4 hardware. The physical address 7677 * of the bpl/bde is converted back to a virtual address. 7678 * If the IOCB contains a BPL then the list of BDE's is 7679 * converted to sli4_sge's. If the IOCB contains a single 7680 * BDE then it is converted to a single sli_sge. 7681 * The IOCB is still in cpu endianess so the contents of 7682 * the bpl can be used without byte swapping. 7683 * 7684 * Returns valid XRI = Success, NO_XRI = Failure. 7685 **/ 7686 static uint16_t 7687 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 7688 struct lpfc_sglq *sglq) 7689 { 7690 uint16_t xritag = NO_XRI; 7691 struct ulp_bde64 *bpl = NULL; 7692 struct ulp_bde64 bde; 7693 struct sli4_sge *sgl = NULL; 7694 struct lpfc_dmabuf *dmabuf; 7695 IOCB_t *icmd; 7696 int numBdes = 0; 7697 int i = 0; 7698 uint32_t offset = 0; /* accumulated offset in the sg request list */ 7699 int inbound = 0; /* number of sg reply entries inbound from firmware */ 7700 7701 if (!piocbq || !sglq) 7702 return xritag; 7703 7704 sgl = (struct sli4_sge *)sglq->sgl; 7705 icmd = &piocbq->iocb; 7706 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 7707 return sglq->sli4_xritag; 7708 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7709 numBdes = icmd->un.genreq64.bdl.bdeSize / 7710 sizeof(struct ulp_bde64); 7711 /* The addrHigh and addrLow fields within the IOCB 7712 * have not been byteswapped yet so there is no 7713 * need to swap them back. 7714 */ 7715 if (piocbq->context3) 7716 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 7717 else 7718 return xritag; 7719 7720 bpl = (struct ulp_bde64 *)dmabuf->virt; 7721 if (!bpl) 7722 return xritag; 7723 7724 for (i = 0; i < numBdes; i++) { 7725 /* Should already be byte swapped. */ 7726 sgl->addr_hi = bpl->addrHigh; 7727 sgl->addr_lo = bpl->addrLow; 7728 7729 sgl->word2 = le32_to_cpu(sgl->word2); 7730 if ((i+1) == numBdes) 7731 bf_set(lpfc_sli4_sge_last, sgl, 1); 7732 else 7733 bf_set(lpfc_sli4_sge_last, sgl, 0); 7734 /* swap the size field back to the cpu so we 7735 * can assign it to the sgl. 7736 */ 7737 bde.tus.w = le32_to_cpu(bpl->tus.w); 7738 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 7739 /* The offsets in the sgl need to be accumulated 7740 * separately for the request and reply lists. 7741 * The request is always first, the reply follows. 7742 */ 7743 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 7744 /* add up the reply sg entries */ 7745 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 7746 inbound++; 7747 /* first inbound? reset the offset */ 7748 if (inbound == 1) 7749 offset = 0; 7750 bf_set(lpfc_sli4_sge_offset, sgl, offset); 7751 bf_set(lpfc_sli4_sge_type, sgl, 7752 LPFC_SGE_TYPE_DATA); 7753 offset += bde.tus.f.bdeSize; 7754 } 7755 sgl->word2 = cpu_to_le32(sgl->word2); 7756 bpl++; 7757 sgl++; 7758 } 7759 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 7760 /* The addrHigh and addrLow fields of the BDE have not 7761 * been byteswapped yet so they need to be swapped 7762 * before putting them in the sgl. 7763 */ 7764 sgl->addr_hi = 7765 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 7766 sgl->addr_lo = 7767 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 7768 sgl->word2 = le32_to_cpu(sgl->word2); 7769 bf_set(lpfc_sli4_sge_last, sgl, 1); 7770 sgl->word2 = cpu_to_le32(sgl->word2); 7771 sgl->sge_len = 7772 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 7773 } 7774 return sglq->sli4_xritag; 7775 } 7776 7777 /** 7778 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 7779 * @phba: Pointer to HBA context object. 7780 * 7781 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 7782 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 7783 * held. 7784 * 7785 * Return: index into SLI4 fast-path FCP queue index. 7786 **/ 7787 static uint32_t 7788 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7789 { 7790 ++phba->fcp_qidx; 7791 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 7792 phba->fcp_qidx = 0; 7793 7794 return phba->fcp_qidx; 7795 } 7796 7797 /** 7798 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 7799 * @phba: Pointer to HBA context object. 7800 * @piocb: Pointer to command iocb. 7801 * @wqe: Pointer to the work queue entry. 7802 * 7803 * This routine converts the iocb command to its Work Queue Entry 7804 * equivalent. The wqe pointer should not have any fields set when 7805 * this routine is called because it will memcpy over them. 7806 * This routine does not set the CQ_ID or the WQEC bits in the 7807 * wqe. 7808 * 7809 * Returns: 0 = Success, IOCB_ERROR = Failure. 7810 **/ 7811 static int 7812 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 7813 union lpfc_wqe *wqe) 7814 { 7815 uint32_t xmit_len = 0, total_len = 0; 7816 uint8_t ct = 0; 7817 uint32_t fip; 7818 uint32_t abort_tag; 7819 uint8_t command_type = ELS_COMMAND_NON_FIP; 7820 uint8_t cmnd; 7821 uint16_t xritag; 7822 uint16_t abrt_iotag; 7823 struct lpfc_iocbq *abrtiocbq; 7824 struct ulp_bde64 *bpl = NULL; 7825 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 7826 int numBdes, i; 7827 struct ulp_bde64 bde; 7828 struct lpfc_nodelist *ndlp; 7829 uint32_t *pcmd; 7830 uint32_t if_type; 7831 7832 fip = phba->hba_flag & HBA_FIP_SUPPORT; 7833 /* The fcp commands will set command type */ 7834 if (iocbq->iocb_flag & LPFC_IO_FCP) 7835 command_type = FCP_COMMAND; 7836 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 7837 command_type = ELS_COMMAND_FIP; 7838 else 7839 command_type = ELS_COMMAND_NON_FIP; 7840 7841 /* Some of the fields are in the right position already */ 7842 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 7843 abort_tag = (uint32_t) iocbq->iotag; 7844 xritag = iocbq->sli4_xritag; 7845 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 7846 /* words0-2 bpl convert bde */ 7847 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7848 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 7849 sizeof(struct ulp_bde64); 7850 bpl = (struct ulp_bde64 *) 7851 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 7852 if (!bpl) 7853 return IOCB_ERROR; 7854 7855 /* Should already be byte swapped. */ 7856 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 7857 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 7858 /* swap the size field back to the cpu so we 7859 * can assign it to the sgl. 7860 */ 7861 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 7862 xmit_len = wqe->generic.bde.tus.f.bdeSize; 7863 total_len = 0; 7864 for (i = 0; i < numBdes; i++) { 7865 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 7866 total_len += bde.tus.f.bdeSize; 7867 } 7868 } else 7869 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 7870 7871 iocbq->iocb.ulpIoTag = iocbq->iotag; 7872 cmnd = iocbq->iocb.ulpCommand; 7873 7874 switch (iocbq->iocb.ulpCommand) { 7875 case CMD_ELS_REQUEST64_CR: 7876 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 7877 ndlp = iocbq->context_un.ndlp; 7878 else 7879 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7880 if (!iocbq->iocb.ulpLe) { 7881 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7882 "2007 Only Limited Edition cmd Format" 7883 " supported 0x%x\n", 7884 iocbq->iocb.ulpCommand); 7885 return IOCB_ERROR; 7886 } 7887 7888 wqe->els_req.payload_len = xmit_len; 7889 /* Els_reguest64 has a TMO */ 7890 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 7891 iocbq->iocb.ulpTimeout); 7892 /* Need a VF for word 4 set the vf bit*/ 7893 bf_set(els_req64_vf, &wqe->els_req, 0); 7894 /* And a VFID for word 12 */ 7895 bf_set(els_req64_vfid, &wqe->els_req, 0); 7896 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 7897 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7898 iocbq->iocb.ulpContext); 7899 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 7900 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 7901 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 7902 if (command_type == ELS_COMMAND_FIP) 7903 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7904 >> LPFC_FIP_ELS_ID_SHIFT); 7905 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 7906 iocbq->context2)->virt); 7907 if_type = bf_get(lpfc_sli_intf_if_type, 7908 &phba->sli4_hba.sli_intf); 7909 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7910 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 7911 *pcmd == ELS_CMD_SCR || 7912 *pcmd == ELS_CMD_FDISC || 7913 *pcmd == ELS_CMD_LOGO || 7914 *pcmd == ELS_CMD_PLOGI)) { 7915 bf_set(els_req64_sp, &wqe->els_req, 1); 7916 bf_set(els_req64_sid, &wqe->els_req, 7917 iocbq->vport->fc_myDID); 7918 if ((*pcmd == ELS_CMD_FLOGI) && 7919 !(phba->fc_topology == 7920 LPFC_TOPOLOGY_LOOP)) 7921 bf_set(els_req64_sid, &wqe->els_req, 0); 7922 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 7923 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7924 phba->vpi_ids[iocbq->vport->vpi]); 7925 } else if (pcmd && iocbq->context1) { 7926 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 7927 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7928 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7929 } 7930 } 7931 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 7932 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7933 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7934 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7935 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7936 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 7937 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7938 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 7939 break; 7940 case CMD_XMIT_SEQUENCE64_CX: 7941 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 7942 iocbq->iocb.un.ulpWord[3]); 7943 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 7944 iocbq->iocb.unsli3.rcvsli3.ox_id); 7945 /* The entire sequence is transmitted for this IOCB */ 7946 xmit_len = total_len; 7947 cmnd = CMD_XMIT_SEQUENCE64_CR; 7948 if (phba->link_flag & LS_LOOPBACK_MODE) 7949 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 7950 case CMD_XMIT_SEQUENCE64_CR: 7951 /* word3 iocb=io_tag32 wqe=reserved */ 7952 wqe->xmit_sequence.rsvd3 = 0; 7953 /* word4 relative_offset memcpy */ 7954 /* word5 r_ctl/df_ctl memcpy */ 7955 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 7956 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 7957 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 7958 LPFC_WQE_IOD_WRITE); 7959 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 7960 LPFC_WQE_LENLOC_WORD12); 7961 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 7962 wqe->xmit_sequence.xmit_len = xmit_len; 7963 command_type = OTHER_COMMAND; 7964 break; 7965 case CMD_XMIT_BCAST64_CN: 7966 /* word3 iocb=iotag32 wqe=seq_payload_len */ 7967 wqe->xmit_bcast64.seq_payload_len = xmit_len; 7968 /* word4 iocb=rsvd wqe=rsvd */ 7969 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 7970 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 7971 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 7972 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7973 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 7974 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 7975 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 7976 LPFC_WQE_LENLOC_WORD3); 7977 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 7978 break; 7979 case CMD_FCP_IWRITE64_CR: 7980 command_type = FCP_COMMAND_DATA_OUT; 7981 /* word3 iocb=iotag wqe=payload_offset_len */ 7982 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7983 wqe->fcp_iwrite.payload_offset_len = 7984 xmit_len + sizeof(struct fcp_rsp); 7985 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 7986 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 7987 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 7988 iocbq->iocb.ulpFCP2Rcvy); 7989 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 7990 /* Always open the exchange */ 7991 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 7992 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 7993 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 7994 LPFC_WQE_LENLOC_WORD4); 7995 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7996 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7997 if (iocbq->iocb_flag & LPFC_IO_DIF) { 7998 iocbq->iocb_flag &= ~LPFC_IO_DIF; 7999 bf_set(wqe_dif, &wqe->generic.wqe_com, 1); 8000 } 8001 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8002 break; 8003 case CMD_FCP_IREAD64_CR: 8004 /* word3 iocb=iotag wqe=payload_offset_len */ 8005 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8006 wqe->fcp_iread.payload_offset_len = 8007 xmit_len + sizeof(struct fcp_rsp); 8008 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8009 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8010 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 8011 iocbq->iocb.ulpFCP2Rcvy); 8012 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 8013 /* Always open the exchange */ 8014 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 8015 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 8016 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 8017 LPFC_WQE_LENLOC_WORD4); 8018 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 8019 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8020 if (iocbq->iocb_flag & LPFC_IO_DIF) { 8021 iocbq->iocb_flag &= ~LPFC_IO_DIF; 8022 bf_set(wqe_dif, &wqe->generic.wqe_com, 1); 8023 } 8024 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8025 break; 8026 case CMD_FCP_ICMND64_CR: 8027 /* word3 iocb=IO_TAG wqe=reserved */ 8028 wqe->fcp_icmd.rsrvd3 = 0; 8029 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8030 /* Always open the exchange */ 8031 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 8032 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 8033 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 8034 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 8035 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8036 LPFC_WQE_LENLOC_NONE); 8037 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8038 break; 8039 case CMD_GEN_REQUEST64_CR: 8040 /* For this command calculate the xmit length of the 8041 * request bde. 8042 */ 8043 xmit_len = 0; 8044 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8045 sizeof(struct ulp_bde64); 8046 for (i = 0; i < numBdes; i++) { 8047 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8048 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 8049 break; 8050 xmit_len += bde.tus.f.bdeSize; 8051 } 8052 /* word3 iocb=IO_TAG wqe=request_payload_len */ 8053 wqe->gen_req.request_payload_len = xmit_len; 8054 /* word4 iocb=parameter wqe=relative_offset memcpy */ 8055 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 8056 /* word6 context tag copied in memcpy */ 8057 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 8058 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8059 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8060 "2015 Invalid CT %x command 0x%x\n", 8061 ct, iocbq->iocb.ulpCommand); 8062 return IOCB_ERROR; 8063 } 8064 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 8065 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 8066 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 8067 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 8068 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 8069 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 8070 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8071 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 8072 command_type = OTHER_COMMAND; 8073 break; 8074 case CMD_XMIT_ELS_RSP64_CX: 8075 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8076 /* words0-2 BDE memcpy */ 8077 /* word3 iocb=iotag32 wqe=response_payload_len */ 8078 wqe->xmit_els_rsp.response_payload_len = xmit_len; 8079 /* word4 */ 8080 wqe->xmit_els_rsp.word4 = 0; 8081 /* word5 iocb=rsvd wge=did */ 8082 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 8083 iocbq->iocb.un.xseq64.xmit_els_remoteID); 8084 8085 if_type = bf_get(lpfc_sli_intf_if_type, 8086 &phba->sli4_hba.sli_intf); 8087 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8088 if (iocbq->vport->fc_flag & FC_PT2PT) { 8089 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8090 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8091 iocbq->vport->fc_myDID); 8092 if (iocbq->vport->fc_myDID == Fabric_DID) { 8093 bf_set(wqe_els_did, 8094 &wqe->xmit_els_rsp.wqe_dest, 0); 8095 } 8096 } 8097 } 8098 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 8099 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8100 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 8101 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8102 iocbq->iocb.unsli3.rcvsli3.ox_id); 8103 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 8104 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8105 phba->vpi_ids[iocbq->vport->vpi]); 8106 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 8107 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 8108 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 8109 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 8110 LPFC_WQE_LENLOC_WORD3); 8111 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 8112 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 8113 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8114 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8115 iocbq->context2)->virt); 8116 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8117 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8118 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8119 iocbq->vport->fc_myDID); 8120 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 8121 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8122 phba->vpi_ids[phba->pport->vpi]); 8123 } 8124 command_type = OTHER_COMMAND; 8125 break; 8126 case CMD_CLOSE_XRI_CN: 8127 case CMD_ABORT_XRI_CN: 8128 case CMD_ABORT_XRI_CX: 8129 /* words 0-2 memcpy should be 0 rserved */ 8130 /* port will send abts */ 8131 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 8132 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 8133 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 8134 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 8135 } else 8136 fip = 0; 8137 8138 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 8139 /* 8140 * The link is down, or the command was ELS_FIP 8141 * so the fw does not need to send abts 8142 * on the wire. 8143 */ 8144 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 8145 else 8146 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 8147 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 8148 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 8149 wqe->abort_cmd.rsrvd5 = 0; 8150 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 8151 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8152 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 8153 /* 8154 * The abort handler will send us CMD_ABORT_XRI_CN or 8155 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 8156 */ 8157 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 8158 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 8159 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 8160 LPFC_WQE_LENLOC_NONE); 8161 cmnd = CMD_ABORT_XRI_CX; 8162 command_type = OTHER_COMMAND; 8163 xritag = 0; 8164 break; 8165 case CMD_XMIT_BLS_RSP64_CX: 8166 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8167 /* As BLS ABTS RSP WQE is very different from other WQEs, 8168 * we re-construct this WQE here based on information in 8169 * iocbq from scratch. 8170 */ 8171 memset(wqe, 0, sizeof(union lpfc_wqe)); 8172 /* OX_ID is invariable to who sent ABTS to CT exchange */ 8173 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 8174 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 8175 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 8176 LPFC_ABTS_UNSOL_INT) { 8177 /* ABTS sent by initiator to CT exchange, the 8178 * RX_ID field will be filled with the newly 8179 * allocated responder XRI. 8180 */ 8181 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8182 iocbq->sli4_xritag); 8183 } else { 8184 /* ABTS sent by responder to CT exchange, the 8185 * RX_ID field will be filled with the responder 8186 * RX_ID from ABTS. 8187 */ 8188 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8189 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 8190 } 8191 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 8192 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 8193 8194 /* Use CT=VPI */ 8195 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 8196 ndlp->nlp_DID); 8197 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 8198 iocbq->iocb.ulpContext); 8199 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 8200 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 8201 phba->vpi_ids[phba->pport->vpi]); 8202 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 8203 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 8204 LPFC_WQE_LENLOC_NONE); 8205 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 8206 command_type = OTHER_COMMAND; 8207 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 8208 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 8209 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 8210 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 8211 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 8212 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 8213 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 8214 } 8215 8216 break; 8217 case CMD_XRI_ABORTED_CX: 8218 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 8219 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 8220 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 8221 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 8222 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 8223 default: 8224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8225 "2014 Invalid command 0x%x\n", 8226 iocbq->iocb.ulpCommand); 8227 return IOCB_ERROR; 8228 break; 8229 } 8230 8231 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 8232 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 8233 wqe->generic.wqe_com.abort_tag = abort_tag; 8234 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 8235 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 8236 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 8237 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 8238 return 0; 8239 } 8240 8241 /** 8242 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 8243 * @phba: Pointer to HBA context object. 8244 * @ring_number: SLI ring number to issue iocb on. 8245 * @piocb: Pointer to command iocb. 8246 * @flag: Flag indicating if this command can be put into txq. 8247 * 8248 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 8249 * an iocb command to an HBA with SLI-4 interface spec. 8250 * 8251 * This function is called with hbalock held. The function will return success 8252 * after it successfully submit the iocb to firmware or after adding to the 8253 * txq. 8254 **/ 8255 static int 8256 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 8257 struct lpfc_iocbq *piocb, uint32_t flag) 8258 { 8259 struct lpfc_sglq *sglq; 8260 union lpfc_wqe wqe; 8261 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8262 8263 if (piocb->sli4_xritag == NO_XRI) { 8264 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8265 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8266 sglq = NULL; 8267 else { 8268 if (pring->txq_cnt) { 8269 if (!(flag & SLI_IOCB_RET_IOCB)) { 8270 __lpfc_sli_ringtx_put(phba, 8271 pring, piocb); 8272 return IOCB_SUCCESS; 8273 } else { 8274 return IOCB_BUSY; 8275 } 8276 } else { 8277 sglq = __lpfc_sli_get_sglq(phba, piocb); 8278 if (!sglq) { 8279 if (!(flag & SLI_IOCB_RET_IOCB)) { 8280 __lpfc_sli_ringtx_put(phba, 8281 pring, 8282 piocb); 8283 return IOCB_SUCCESS; 8284 } else 8285 return IOCB_BUSY; 8286 } 8287 } 8288 } 8289 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 8290 /* These IO's already have an XRI and a mapped sgl. */ 8291 sglq = NULL; 8292 } else { 8293 /* 8294 * This is a continuation of a commandi,(CX) so this 8295 * sglq is on the active list 8296 */ 8297 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 8298 if (!sglq) 8299 return IOCB_ERROR; 8300 } 8301 8302 if (sglq) { 8303 piocb->sli4_lxritag = sglq->sli4_lxritag; 8304 piocb->sli4_xritag = sglq->sli4_xritag; 8305 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 8306 return IOCB_ERROR; 8307 } 8308 8309 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 8310 return IOCB_ERROR; 8311 8312 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8313 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8314 /* 8315 * For FCP command IOCB, get a new WQ index to distribute 8316 * WQE across the WQsr. On the other hand, for abort IOCB, 8317 * it carries the same WQ index to the original command 8318 * IOCB. 8319 */ 8320 if (piocb->iocb_flag & LPFC_IO_FCP) 8321 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8322 if (unlikely(!phba->sli4_hba.fcp_wq)) 8323 return IOCB_ERROR; 8324 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8325 &wqe)) 8326 return IOCB_ERROR; 8327 } else { 8328 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8329 return IOCB_ERROR; 8330 } 8331 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 8332 8333 return 0; 8334 } 8335 8336 /** 8337 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 8338 * 8339 * This routine wraps the actual lockless version for issusing IOCB function 8340 * pointer from the lpfc_hba struct. 8341 * 8342 * Return codes: 8343 * IOCB_ERROR - Error 8344 * IOCB_SUCCESS - Success 8345 * IOCB_BUSY - Busy 8346 **/ 8347 int 8348 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8349 struct lpfc_iocbq *piocb, uint32_t flag) 8350 { 8351 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8352 } 8353 8354 /** 8355 * lpfc_sli_api_table_setup - Set up sli api function jump table 8356 * @phba: The hba struct for which this call is being executed. 8357 * @dev_grp: The HBA PCI-Device group number. 8358 * 8359 * This routine sets up the SLI interface API function jump table in @phba 8360 * struct. 8361 * Returns: 0 - success, -ENODEV - failure. 8362 **/ 8363 int 8364 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8365 { 8366 8367 switch (dev_grp) { 8368 case LPFC_PCI_DEV_LP: 8369 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 8370 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 8371 break; 8372 case LPFC_PCI_DEV_OC: 8373 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 8374 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 8375 break; 8376 default: 8377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8378 "1419 Invalid HBA PCI-device group: 0x%x\n", 8379 dev_grp); 8380 return -ENODEV; 8381 break; 8382 } 8383 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 8384 return 0; 8385 } 8386 8387 /** 8388 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 8389 * @phba: Pointer to HBA context object. 8390 * @pring: Pointer to driver SLI ring object. 8391 * @piocb: Pointer to command iocb. 8392 * @flag: Flag indicating if this command can be put into txq. 8393 * 8394 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 8395 * function. This function gets the hbalock and calls 8396 * __lpfc_sli_issue_iocb function and will return the error returned 8397 * by __lpfc_sli_issue_iocb function. This wrapper is used by 8398 * functions which do not hold hbalock. 8399 **/ 8400 int 8401 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8402 struct lpfc_iocbq *piocb, uint32_t flag) 8403 { 8404 unsigned long iflags; 8405 int rc; 8406 8407 spin_lock_irqsave(&phba->hbalock, iflags); 8408 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8409 spin_unlock_irqrestore(&phba->hbalock, iflags); 8410 8411 return rc; 8412 } 8413 8414 /** 8415 * lpfc_extra_ring_setup - Extra ring setup function 8416 * @phba: Pointer to HBA context object. 8417 * 8418 * This function is called while driver attaches with the 8419 * HBA to setup the extra ring. The extra ring is used 8420 * only when driver needs to support target mode functionality 8421 * or IP over FC functionalities. 8422 * 8423 * This function is called with no lock held. 8424 **/ 8425 static int 8426 lpfc_extra_ring_setup( struct lpfc_hba *phba) 8427 { 8428 struct lpfc_sli *psli; 8429 struct lpfc_sli_ring *pring; 8430 8431 psli = &phba->sli; 8432 8433 /* Adjust cmd/rsp ring iocb entries more evenly */ 8434 8435 /* Take some away from the FCP ring */ 8436 pring = &psli->ring[psli->fcp_ring]; 8437 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8438 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8439 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8440 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8441 8442 /* and give them to the extra ring */ 8443 pring = &psli->ring[psli->extra_ring]; 8444 8445 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8446 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8447 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8448 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8449 8450 /* Setup default profile for this ring */ 8451 pring->iotag_max = 4096; 8452 pring->num_mask = 1; 8453 pring->prt[0].profile = 0; /* Mask 0 */ 8454 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 8455 pring->prt[0].type = phba->cfg_multi_ring_type; 8456 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 8457 return 0; 8458 } 8459 8460 /* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS. 8461 * @vport: pointer to virtual port object. 8462 * @ndlp: nodelist pointer for the impacted rport. 8463 * 8464 * The driver calls this routine in response to a XRI ABORT CQE 8465 * event from the port. In this event, the driver is required to 8466 * recover its login to the rport even though its login may be valid 8467 * from the driver's perspective. The failed ABTS notice from the 8468 * port indicates the rport is not responding. 8469 */ 8470 static void 8471 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 8472 struct lpfc_nodelist *ndlp) 8473 { 8474 struct Scsi_Host *shost; 8475 struct lpfc_hba *phba; 8476 unsigned long flags = 0; 8477 8478 shost = lpfc_shost_from_vport(vport); 8479 phba = vport->phba; 8480 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 8481 lpfc_printf_log(phba, KERN_INFO, 8482 LOG_SLI, "3093 No rport recovery needed. " 8483 "rport in state 0x%x\n", 8484 ndlp->nlp_state); 8485 return; 8486 } 8487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8488 "3094 Start rport recovery on shost id 0x%x " 8489 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 8490 "flags 0x%x\n", 8491 shost->host_no, ndlp->nlp_DID, 8492 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 8493 ndlp->nlp_flag); 8494 /* 8495 * The rport is not responding. Don't attempt ADISC recovery. 8496 * Remove the FCP-2 flag to force a PLOGI. 8497 */ 8498 spin_lock_irqsave(shost->host_lock, flags); 8499 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 8500 spin_unlock_irqrestore(shost->host_lock, flags); 8501 lpfc_disc_state_machine(vport, ndlp, NULL, 8502 NLP_EVT_DEVICE_RECOVERY); 8503 lpfc_cancel_retry_delay_tmo(vport, ndlp); 8504 spin_lock_irqsave(shost->host_lock, flags); 8505 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 8506 spin_unlock_irqrestore(shost->host_lock, flags); 8507 lpfc_disc_start(vport); 8508 } 8509 8510 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8511 * @phba: Pointer to HBA context object. 8512 * @iocbq: Pointer to iocb object. 8513 * 8514 * The async_event handler calls this routine when it receives 8515 * an ASYNC_STATUS_CN event from the port. The port generates 8516 * this event when an Abort Sequence request to an rport fails 8517 * twice in succession. The abort could be originated by the 8518 * driver or by the port. The ABTS could have been for an ELS 8519 * or FCP IO. The port only generates this event when an ABTS 8520 * fails to complete after one retry. 8521 */ 8522 static void 8523 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 8524 struct lpfc_iocbq *iocbq) 8525 { 8526 struct lpfc_nodelist *ndlp = NULL; 8527 uint16_t rpi = 0, vpi = 0; 8528 struct lpfc_vport *vport = NULL; 8529 8530 /* The rpi in the ulpContext is vport-sensitive. */ 8531 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 8532 rpi = iocbq->iocb.ulpContext; 8533 8534 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8535 "3092 Port generated ABTS async event " 8536 "on vpi %d rpi %d status 0x%x\n", 8537 vpi, rpi, iocbq->iocb.ulpStatus); 8538 8539 vport = lpfc_find_vport_by_vpid(phba, vpi); 8540 if (!vport) 8541 goto err_exit; 8542 ndlp = lpfc_findnode_rpi(vport, rpi); 8543 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8544 goto err_exit; 8545 8546 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 8547 lpfc_sli_abts_recover_port(vport, ndlp); 8548 return; 8549 8550 err_exit: 8551 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8552 "3095 Event Context not found, no " 8553 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 8554 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 8555 vpi, rpi); 8556 } 8557 8558 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 8559 * @phba: pointer to HBA context object. 8560 * @ndlp: nodelist pointer for the impacted rport. 8561 * @axri: pointer to the wcqe containing the failed exchange. 8562 * 8563 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 8564 * port. The port generates this event when an abort exchange request to an 8565 * rport fails twice in succession with no reply. The abort could be originated 8566 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 8567 */ 8568 void 8569 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 8570 struct lpfc_nodelist *ndlp, 8571 struct sli4_wcqe_xri_aborted *axri) 8572 { 8573 struct lpfc_vport *vport; 8574 uint32_t ext_status = 0; 8575 8576 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 8577 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8578 "3115 Node Context not found, driver " 8579 "ignoring abts err event\n"); 8580 return; 8581 } 8582 8583 vport = ndlp->vport; 8584 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8585 "3116 Port generated FCP XRI ABORT event on " 8586 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 8587 ndlp->vport->vpi, ndlp->nlp_rpi, 8588 bf_get(lpfc_wcqe_xa_xri, axri), 8589 bf_get(lpfc_wcqe_xa_status, axri), 8590 axri->parameter); 8591 8592 /* 8593 * Catch the ABTS protocol failure case. Older OCe FW releases returned 8594 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 8595 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 8596 */ 8597 ext_status = axri->parameter & WCQE_PARAM_MASK; 8598 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 8599 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 8600 lpfc_sli_abts_recover_port(vport, ndlp); 8601 } 8602 8603 /** 8604 * lpfc_sli_async_event_handler - ASYNC iocb handler function 8605 * @phba: Pointer to HBA context object. 8606 * @pring: Pointer to driver SLI ring object. 8607 * @iocbq: Pointer to iocb object. 8608 * 8609 * This function is called by the slow ring event handler 8610 * function when there is an ASYNC event iocb in the ring. 8611 * This function is called with no lock held. 8612 * Currently this function handles only temperature related 8613 * ASYNC events. The function decodes the temperature sensor 8614 * event message and posts events for the management applications. 8615 **/ 8616 static void 8617 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 8618 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 8619 { 8620 IOCB_t *icmd; 8621 uint16_t evt_code; 8622 struct temp_event temp_event_data; 8623 struct Scsi_Host *shost; 8624 uint32_t *iocb_w; 8625 8626 icmd = &iocbq->iocb; 8627 evt_code = icmd->un.asyncstat.evt_code; 8628 8629 switch (evt_code) { 8630 case ASYNC_TEMP_WARN: 8631 case ASYNC_TEMP_SAFE: 8632 temp_event_data.data = (uint32_t) icmd->ulpContext; 8633 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 8634 if (evt_code == ASYNC_TEMP_WARN) { 8635 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 8636 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8637 "0347 Adapter is very hot, please take " 8638 "corrective action. temperature : %d Celsius\n", 8639 (uint32_t) icmd->ulpContext); 8640 } else { 8641 temp_event_data.event_code = LPFC_NORMAL_TEMP; 8642 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8643 "0340 Adapter temperature is OK now. " 8644 "temperature : %d Celsius\n", 8645 (uint32_t) icmd->ulpContext); 8646 } 8647 8648 /* Send temperature change event to applications */ 8649 shost = lpfc_shost_from_vport(phba->pport); 8650 fc_host_post_vendor_event(shost, fc_get_event_number(), 8651 sizeof(temp_event_data), (char *) &temp_event_data, 8652 LPFC_NL_VENDOR_ID); 8653 break; 8654 case ASYNC_STATUS_CN: 8655 lpfc_sli_abts_err_handler(phba, iocbq); 8656 break; 8657 default: 8658 iocb_w = (uint32_t *) icmd; 8659 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8660 "0346 Ring %d handler: unexpected ASYNC_STATUS" 8661 " evt_code 0x%x\n" 8662 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 8663 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 8664 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 8665 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 8666 pring->ringno, icmd->un.asyncstat.evt_code, 8667 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 8668 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 8669 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 8670 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 8671 8672 break; 8673 } 8674 } 8675 8676 8677 /** 8678 * lpfc_sli_setup - SLI ring setup function 8679 * @phba: Pointer to HBA context object. 8680 * 8681 * lpfc_sli_setup sets up rings of the SLI interface with 8682 * number of iocbs per ring and iotags. This function is 8683 * called while driver attach to the HBA and before the 8684 * interrupts are enabled. So there is no need for locking. 8685 * 8686 * This function always returns 0. 8687 **/ 8688 int 8689 lpfc_sli_setup(struct lpfc_hba *phba) 8690 { 8691 int i, totiocbsize = 0; 8692 struct lpfc_sli *psli = &phba->sli; 8693 struct lpfc_sli_ring *pring; 8694 8695 psli->num_rings = MAX_CONFIGURED_RINGS; 8696 psli->sli_flag = 0; 8697 psli->fcp_ring = LPFC_FCP_RING; 8698 psli->next_ring = LPFC_FCP_NEXT_RING; 8699 psli->extra_ring = LPFC_EXTRA_RING; 8700 8701 psli->iocbq_lookup = NULL; 8702 psli->iocbq_lookup_len = 0; 8703 psli->last_iotag = 0; 8704 8705 for (i = 0; i < psli->num_rings; i++) { 8706 pring = &psli->ring[i]; 8707 switch (i) { 8708 case LPFC_FCP_RING: /* ring 0 - FCP */ 8709 /* numCiocb and numRiocb are used in config_port */ 8710 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8711 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8712 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8713 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8714 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8715 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8716 pring->sizeCiocb = (phba->sli_rev == 3) ? 8717 SLI3_IOCB_CMD_SIZE : 8718 SLI2_IOCB_CMD_SIZE; 8719 pring->sizeRiocb = (phba->sli_rev == 3) ? 8720 SLI3_IOCB_RSP_SIZE : 8721 SLI2_IOCB_RSP_SIZE; 8722 pring->iotag_ctr = 0; 8723 pring->iotag_max = 8724 (phba->cfg_hba_queue_depth * 2); 8725 pring->fast_iotag = pring->iotag_max; 8726 pring->num_mask = 0; 8727 break; 8728 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8729 /* numCiocb and numRiocb are used in config_port */ 8730 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 8731 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 8732 pring->sizeCiocb = (phba->sli_rev == 3) ? 8733 SLI3_IOCB_CMD_SIZE : 8734 SLI2_IOCB_CMD_SIZE; 8735 pring->sizeRiocb = (phba->sli_rev == 3) ? 8736 SLI3_IOCB_RSP_SIZE : 8737 SLI2_IOCB_RSP_SIZE; 8738 pring->iotag_max = phba->cfg_hba_queue_depth; 8739 pring->num_mask = 0; 8740 break; 8741 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 8742 /* numCiocb and numRiocb are used in config_port */ 8743 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 8744 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 8745 pring->sizeCiocb = (phba->sli_rev == 3) ? 8746 SLI3_IOCB_CMD_SIZE : 8747 SLI2_IOCB_CMD_SIZE; 8748 pring->sizeRiocb = (phba->sli_rev == 3) ? 8749 SLI3_IOCB_RSP_SIZE : 8750 SLI2_IOCB_RSP_SIZE; 8751 pring->fast_iotag = 0; 8752 pring->iotag_ctr = 0; 8753 pring->iotag_max = 4096; 8754 pring->lpfc_sli_rcv_async_status = 8755 lpfc_sli_async_event_handler; 8756 pring->num_mask = LPFC_MAX_RING_MASK; 8757 pring->prt[0].profile = 0; /* Mask 0 */ 8758 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 8759 pring->prt[0].type = FC_TYPE_ELS; 8760 pring->prt[0].lpfc_sli_rcv_unsol_event = 8761 lpfc_els_unsol_event; 8762 pring->prt[1].profile = 0; /* Mask 1 */ 8763 pring->prt[1].rctl = FC_RCTL_ELS_REP; 8764 pring->prt[1].type = FC_TYPE_ELS; 8765 pring->prt[1].lpfc_sli_rcv_unsol_event = 8766 lpfc_els_unsol_event; 8767 pring->prt[2].profile = 0; /* Mask 2 */ 8768 /* NameServer Inquiry */ 8769 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 8770 /* NameServer */ 8771 pring->prt[2].type = FC_TYPE_CT; 8772 pring->prt[2].lpfc_sli_rcv_unsol_event = 8773 lpfc_ct_unsol_event; 8774 pring->prt[3].profile = 0; /* Mask 3 */ 8775 /* NameServer response */ 8776 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 8777 /* NameServer */ 8778 pring->prt[3].type = FC_TYPE_CT; 8779 pring->prt[3].lpfc_sli_rcv_unsol_event = 8780 lpfc_ct_unsol_event; 8781 /* abort unsolicited sequence */ 8782 pring->prt[4].profile = 0; /* Mask 4 */ 8783 pring->prt[4].rctl = FC_RCTL_BA_ABTS; 8784 pring->prt[4].type = FC_TYPE_BLS; 8785 pring->prt[4].lpfc_sli_rcv_unsol_event = 8786 lpfc_sli4_ct_abort_unsol_event; 8787 break; 8788 } 8789 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 8790 (pring->numRiocb * pring->sizeRiocb); 8791 } 8792 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 8793 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 8794 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 8795 "SLI2 SLIM Data: x%x x%lx\n", 8796 phba->brd_no, totiocbsize, 8797 (unsigned long) MAX_SLIM_IOCB_SIZE); 8798 } 8799 if (phba->cfg_multi_ring_support == 2) 8800 lpfc_extra_ring_setup(phba); 8801 8802 return 0; 8803 } 8804 8805 /** 8806 * lpfc_sli_queue_setup - Queue initialization function 8807 * @phba: Pointer to HBA context object. 8808 * 8809 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 8810 * ring. This function also initializes ring indices of each ring. 8811 * This function is called during the initialization of the SLI 8812 * interface of an HBA. 8813 * This function is called with no lock held and always returns 8814 * 1. 8815 **/ 8816 int 8817 lpfc_sli_queue_setup(struct lpfc_hba *phba) 8818 { 8819 struct lpfc_sli *psli; 8820 struct lpfc_sli_ring *pring; 8821 int i; 8822 8823 psli = &phba->sli; 8824 spin_lock_irq(&phba->hbalock); 8825 INIT_LIST_HEAD(&psli->mboxq); 8826 INIT_LIST_HEAD(&psli->mboxq_cmpl); 8827 /* Initialize list headers for txq and txcmplq as double linked lists */ 8828 for (i = 0; i < psli->num_rings; i++) { 8829 pring = &psli->ring[i]; 8830 pring->ringno = i; 8831 pring->next_cmdidx = 0; 8832 pring->local_getidx = 0; 8833 pring->cmdidx = 0; 8834 INIT_LIST_HEAD(&pring->txq); 8835 INIT_LIST_HEAD(&pring->txcmplq); 8836 INIT_LIST_HEAD(&pring->iocb_continueq); 8837 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 8838 INIT_LIST_HEAD(&pring->postbufq); 8839 } 8840 spin_unlock_irq(&phba->hbalock); 8841 return 1; 8842 } 8843 8844 /** 8845 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 8846 * @phba: Pointer to HBA context object. 8847 * 8848 * This routine flushes the mailbox command subsystem. It will unconditionally 8849 * flush all the mailbox commands in the three possible stages in the mailbox 8850 * command sub-system: pending mailbox command queue; the outstanding mailbox 8851 * command; and completed mailbox command queue. It is caller's responsibility 8852 * to make sure that the driver is in the proper state to flush the mailbox 8853 * command sub-system. Namely, the posting of mailbox commands into the 8854 * pending mailbox command queue from the various clients must be stopped; 8855 * either the HBA is in a state that it will never works on the outstanding 8856 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 8857 * mailbox command has been completed. 8858 **/ 8859 static void 8860 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 8861 { 8862 LIST_HEAD(completions); 8863 struct lpfc_sli *psli = &phba->sli; 8864 LPFC_MBOXQ_t *pmb; 8865 unsigned long iflag; 8866 8867 /* Flush all the mailbox commands in the mbox system */ 8868 spin_lock_irqsave(&phba->hbalock, iflag); 8869 /* The pending mailbox command queue */ 8870 list_splice_init(&phba->sli.mboxq, &completions); 8871 /* The outstanding active mailbox command */ 8872 if (psli->mbox_active) { 8873 list_add_tail(&psli->mbox_active->list, &completions); 8874 psli->mbox_active = NULL; 8875 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8876 } 8877 /* The completed mailbox command queue */ 8878 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 8879 spin_unlock_irqrestore(&phba->hbalock, iflag); 8880 8881 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 8882 while (!list_empty(&completions)) { 8883 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 8884 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 8885 if (pmb->mbox_cmpl) 8886 pmb->mbox_cmpl(phba, pmb); 8887 } 8888 } 8889 8890 /** 8891 * lpfc_sli_host_down - Vport cleanup function 8892 * @vport: Pointer to virtual port object. 8893 * 8894 * lpfc_sli_host_down is called to clean up the resources 8895 * associated with a vport before destroying virtual 8896 * port data structures. 8897 * This function does following operations: 8898 * - Free discovery resources associated with this virtual 8899 * port. 8900 * - Free iocbs associated with this virtual port in 8901 * the txq. 8902 * - Send abort for all iocb commands associated with this 8903 * vport in txcmplq. 8904 * 8905 * This function is called with no lock held and always returns 1. 8906 **/ 8907 int 8908 lpfc_sli_host_down(struct lpfc_vport *vport) 8909 { 8910 LIST_HEAD(completions); 8911 struct lpfc_hba *phba = vport->phba; 8912 struct lpfc_sli *psli = &phba->sli; 8913 struct lpfc_sli_ring *pring; 8914 struct lpfc_iocbq *iocb, *next_iocb; 8915 int i; 8916 unsigned long flags = 0; 8917 uint16_t prev_pring_flag; 8918 8919 lpfc_cleanup_discovery_resources(vport); 8920 8921 spin_lock_irqsave(&phba->hbalock, flags); 8922 for (i = 0; i < psli->num_rings; i++) { 8923 pring = &psli->ring[i]; 8924 prev_pring_flag = pring->flag; 8925 /* Only slow rings */ 8926 if (pring->ringno == LPFC_ELS_RING) { 8927 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8928 /* Set the lpfc data pending flag */ 8929 set_bit(LPFC_DATA_READY, &phba->data_flags); 8930 } 8931 /* 8932 * Error everything on the txq since these iocbs have not been 8933 * given to the FW yet. 8934 */ 8935 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 8936 if (iocb->vport != vport) 8937 continue; 8938 list_move_tail(&iocb->list, &completions); 8939 pring->txq_cnt--; 8940 } 8941 8942 /* Next issue ABTS for everything on the txcmplq */ 8943 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 8944 list) { 8945 if (iocb->vport != vport) 8946 continue; 8947 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 8948 } 8949 8950 pring->flag = prev_pring_flag; 8951 } 8952 8953 spin_unlock_irqrestore(&phba->hbalock, flags); 8954 8955 /* Cancel all the IOCBs from the completions list */ 8956 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8957 IOERR_SLI_DOWN); 8958 return 1; 8959 } 8960 8961 /** 8962 * lpfc_sli_hba_down - Resource cleanup function for the HBA 8963 * @phba: Pointer to HBA context object. 8964 * 8965 * This function cleans up all iocb, buffers, mailbox commands 8966 * while shutting down the HBA. This function is called with no 8967 * lock held and always returns 1. 8968 * This function does the following to cleanup driver resources: 8969 * - Free discovery resources for each virtual port 8970 * - Cleanup any pending fabric iocbs 8971 * - Iterate through the iocb txq and free each entry 8972 * in the list. 8973 * - Free up any buffer posted to the HBA 8974 * - Free mailbox commands in the mailbox queue. 8975 **/ 8976 int 8977 lpfc_sli_hba_down(struct lpfc_hba *phba) 8978 { 8979 LIST_HEAD(completions); 8980 struct lpfc_sli *psli = &phba->sli; 8981 struct lpfc_sli_ring *pring; 8982 struct lpfc_dmabuf *buf_ptr; 8983 unsigned long flags = 0; 8984 int i; 8985 8986 /* Shutdown the mailbox command sub-system */ 8987 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 8988 8989 lpfc_hba_down_prep(phba); 8990 8991 lpfc_fabric_abort_hba(phba); 8992 8993 spin_lock_irqsave(&phba->hbalock, flags); 8994 for (i = 0; i < psli->num_rings; i++) { 8995 pring = &psli->ring[i]; 8996 /* Only slow rings */ 8997 if (pring->ringno == LPFC_ELS_RING) { 8998 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8999 /* Set the lpfc data pending flag */ 9000 set_bit(LPFC_DATA_READY, &phba->data_flags); 9001 } 9002 9003 /* 9004 * Error everything on the txq since these iocbs have not been 9005 * given to the FW yet. 9006 */ 9007 list_splice_init(&pring->txq, &completions); 9008 pring->txq_cnt = 0; 9009 9010 } 9011 spin_unlock_irqrestore(&phba->hbalock, flags); 9012 9013 /* Cancel all the IOCBs from the completions list */ 9014 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9015 IOERR_SLI_DOWN); 9016 9017 spin_lock_irqsave(&phba->hbalock, flags); 9018 list_splice_init(&phba->elsbuf, &completions); 9019 phba->elsbuf_cnt = 0; 9020 phba->elsbuf_prev_cnt = 0; 9021 spin_unlock_irqrestore(&phba->hbalock, flags); 9022 9023 while (!list_empty(&completions)) { 9024 list_remove_head(&completions, buf_ptr, 9025 struct lpfc_dmabuf, list); 9026 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 9027 kfree(buf_ptr); 9028 } 9029 9030 /* Return any active mbox cmds */ 9031 del_timer_sync(&psli->mbox_tmo); 9032 9033 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 9034 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 9035 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 9036 9037 return 1; 9038 } 9039 9040 /** 9041 * lpfc_sli_pcimem_bcopy - SLI memory copy function 9042 * @srcp: Source memory pointer. 9043 * @destp: Destination memory pointer. 9044 * @cnt: Number of words required to be copied. 9045 * 9046 * This function is used for copying data between driver memory 9047 * and the SLI memory. This function also changes the endianness 9048 * of each word if native endianness is different from SLI 9049 * endianness. This function can be called with or without 9050 * lock. 9051 **/ 9052 void 9053 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 9054 { 9055 uint32_t *src = srcp; 9056 uint32_t *dest = destp; 9057 uint32_t ldata; 9058 int i; 9059 9060 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 9061 ldata = *src; 9062 ldata = le32_to_cpu(ldata); 9063 *dest = ldata; 9064 src++; 9065 dest++; 9066 } 9067 } 9068 9069 9070 /** 9071 * lpfc_sli_bemem_bcopy - SLI memory copy function 9072 * @srcp: Source memory pointer. 9073 * @destp: Destination memory pointer. 9074 * @cnt: Number of words required to be copied. 9075 * 9076 * This function is used for copying data between a data structure 9077 * with big endian representation to local endianness. 9078 * This function can be called with or without lock. 9079 **/ 9080 void 9081 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 9082 { 9083 uint32_t *src = srcp; 9084 uint32_t *dest = destp; 9085 uint32_t ldata; 9086 int i; 9087 9088 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 9089 ldata = *src; 9090 ldata = be32_to_cpu(ldata); 9091 *dest = ldata; 9092 src++; 9093 dest++; 9094 } 9095 } 9096 9097 /** 9098 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 9099 * @phba: Pointer to HBA context object. 9100 * @pring: Pointer to driver SLI ring object. 9101 * @mp: Pointer to driver buffer object. 9102 * 9103 * This function is called with no lock held. 9104 * It always return zero after adding the buffer to the postbufq 9105 * buffer list. 9106 **/ 9107 int 9108 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9109 struct lpfc_dmabuf *mp) 9110 { 9111 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 9112 later */ 9113 spin_lock_irq(&phba->hbalock); 9114 list_add_tail(&mp->list, &pring->postbufq); 9115 pring->postbufq_cnt++; 9116 spin_unlock_irq(&phba->hbalock); 9117 return 0; 9118 } 9119 9120 /** 9121 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 9122 * @phba: Pointer to HBA context object. 9123 * 9124 * When HBQ is enabled, buffers are searched based on tags. This function 9125 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 9126 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 9127 * does not conflict with tags of buffer posted for unsolicited events. 9128 * The function returns the allocated tag. The function is called with 9129 * no locks held. 9130 **/ 9131 uint32_t 9132 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 9133 { 9134 spin_lock_irq(&phba->hbalock); 9135 phba->buffer_tag_count++; 9136 /* 9137 * Always set the QUE_BUFTAG_BIT to distiguish between 9138 * a tag assigned by HBQ. 9139 */ 9140 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 9141 spin_unlock_irq(&phba->hbalock); 9142 return phba->buffer_tag_count; 9143 } 9144 9145 /** 9146 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 9147 * @phba: Pointer to HBA context object. 9148 * @pring: Pointer to driver SLI ring object. 9149 * @tag: Buffer tag. 9150 * 9151 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 9152 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 9153 * iocb is posted to the response ring with the tag of the buffer. 9154 * This function searches the pring->postbufq list using the tag 9155 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 9156 * iocb. If the buffer is found then lpfc_dmabuf object of the 9157 * buffer is returned to the caller else NULL is returned. 9158 * This function is called with no lock held. 9159 **/ 9160 struct lpfc_dmabuf * 9161 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9162 uint32_t tag) 9163 { 9164 struct lpfc_dmabuf *mp, *next_mp; 9165 struct list_head *slp = &pring->postbufq; 9166 9167 /* Search postbufq, from the beginning, looking for a match on tag */ 9168 spin_lock_irq(&phba->hbalock); 9169 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9170 if (mp->buffer_tag == tag) { 9171 list_del_init(&mp->list); 9172 pring->postbufq_cnt--; 9173 spin_unlock_irq(&phba->hbalock); 9174 return mp; 9175 } 9176 } 9177 9178 spin_unlock_irq(&phba->hbalock); 9179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9180 "0402 Cannot find virtual addr for buffer tag on " 9181 "ring %d Data x%lx x%p x%p x%x\n", 9182 pring->ringno, (unsigned long) tag, 9183 slp->next, slp->prev, pring->postbufq_cnt); 9184 9185 return NULL; 9186 } 9187 9188 /** 9189 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 9190 * @phba: Pointer to HBA context object. 9191 * @pring: Pointer to driver SLI ring object. 9192 * @phys: DMA address of the buffer. 9193 * 9194 * This function searches the buffer list using the dma_address 9195 * of unsolicited event to find the driver's lpfc_dmabuf object 9196 * corresponding to the dma_address. The function returns the 9197 * lpfc_dmabuf object if a buffer is found else it returns NULL. 9198 * This function is called by the ct and els unsolicited event 9199 * handlers to get the buffer associated with the unsolicited 9200 * event. 9201 * 9202 * This function is called with no lock held. 9203 **/ 9204 struct lpfc_dmabuf * 9205 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9206 dma_addr_t phys) 9207 { 9208 struct lpfc_dmabuf *mp, *next_mp; 9209 struct list_head *slp = &pring->postbufq; 9210 9211 /* Search postbufq, from the beginning, looking for a match on phys */ 9212 spin_lock_irq(&phba->hbalock); 9213 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9214 if (mp->phys == phys) { 9215 list_del_init(&mp->list); 9216 pring->postbufq_cnt--; 9217 spin_unlock_irq(&phba->hbalock); 9218 return mp; 9219 } 9220 } 9221 9222 spin_unlock_irq(&phba->hbalock); 9223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9224 "0410 Cannot find virtual addr for mapped buf on " 9225 "ring %d Data x%llx x%p x%p x%x\n", 9226 pring->ringno, (unsigned long long)phys, 9227 slp->next, slp->prev, pring->postbufq_cnt); 9228 return NULL; 9229 } 9230 9231 /** 9232 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 9233 * @phba: Pointer to HBA context object. 9234 * @cmdiocb: Pointer to driver command iocb object. 9235 * @rspiocb: Pointer to driver response iocb object. 9236 * 9237 * This function is the completion handler for the abort iocbs for 9238 * ELS commands. This function is called from the ELS ring event 9239 * handler with no lock held. This function frees memory resources 9240 * associated with the abort iocb. 9241 **/ 9242 static void 9243 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9244 struct lpfc_iocbq *rspiocb) 9245 { 9246 IOCB_t *irsp = &rspiocb->iocb; 9247 uint16_t abort_iotag, abort_context; 9248 struct lpfc_iocbq *abort_iocb = NULL; 9249 9250 if (irsp->ulpStatus) { 9251 9252 /* 9253 * Assume that the port already completed and returned, or 9254 * will return the iocb. Just Log the message. 9255 */ 9256 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 9257 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 9258 9259 spin_lock_irq(&phba->hbalock); 9260 if (phba->sli_rev < LPFC_SLI_REV4) { 9261 if (abort_iotag != 0 && 9262 abort_iotag <= phba->sli.last_iotag) 9263 abort_iocb = 9264 phba->sli.iocbq_lookup[abort_iotag]; 9265 } else 9266 /* For sli4 the abort_tag is the XRI, 9267 * so the abort routine puts the iotag of the iocb 9268 * being aborted in the context field of the abort 9269 * IOCB. 9270 */ 9271 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 9272 9273 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 9274 "0327 Cannot abort els iocb %p " 9275 "with tag %x context %x, abort status %x, " 9276 "abort code %x\n", 9277 abort_iocb, abort_iotag, abort_context, 9278 irsp->ulpStatus, irsp->un.ulpWord[4]); 9279 9280 spin_unlock_irq(&phba->hbalock); 9281 } 9282 lpfc_sli_release_iocbq(phba, cmdiocb); 9283 return; 9284 } 9285 9286 /** 9287 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 9288 * @phba: Pointer to HBA context object. 9289 * @cmdiocb: Pointer to driver command iocb object. 9290 * @rspiocb: Pointer to driver response iocb object. 9291 * 9292 * The function is called from SLI ring event handler with no 9293 * lock held. This function is the completion handler for ELS commands 9294 * which are aborted. The function frees memory resources used for 9295 * the aborted ELS commands. 9296 **/ 9297 static void 9298 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9299 struct lpfc_iocbq *rspiocb) 9300 { 9301 IOCB_t *irsp = &rspiocb->iocb; 9302 9303 /* ELS cmd tag <ulpIoTag> completes */ 9304 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9305 "0139 Ignoring ELS cmd tag x%x completion Data: " 9306 "x%x x%x x%x\n", 9307 irsp->ulpIoTag, irsp->ulpStatus, 9308 irsp->un.ulpWord[4], irsp->ulpTimeout); 9309 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 9310 lpfc_ct_free_iocb(phba, cmdiocb); 9311 else 9312 lpfc_els_free_iocb(phba, cmdiocb); 9313 return; 9314 } 9315 9316 /** 9317 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 9318 * @phba: Pointer to HBA context object. 9319 * @pring: Pointer to driver SLI ring object. 9320 * @cmdiocb: Pointer to driver command iocb object. 9321 * 9322 * This function issues an abort iocb for the provided command iocb down to 9323 * the port. Other than the case the outstanding command iocb is an abort 9324 * request, this function issues abort out unconditionally. This function is 9325 * called with hbalock held. The function returns 0 when it fails due to 9326 * memory allocation failure or when the command iocb is an abort request. 9327 **/ 9328 static int 9329 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9330 struct lpfc_iocbq *cmdiocb) 9331 { 9332 struct lpfc_vport *vport = cmdiocb->vport; 9333 struct lpfc_iocbq *abtsiocbp; 9334 IOCB_t *icmd = NULL; 9335 IOCB_t *iabt = NULL; 9336 int retval; 9337 9338 /* 9339 * There are certain command types we don't want to abort. And we 9340 * don't want to abort commands that are already in the process of 9341 * being aborted. 9342 */ 9343 icmd = &cmdiocb->iocb; 9344 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9345 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9346 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9347 return 0; 9348 9349 /* issue ABTS for this IOCB based on iotag */ 9350 abtsiocbp = __lpfc_sli_get_iocbq(phba); 9351 if (abtsiocbp == NULL) 9352 return 0; 9353 9354 /* This signals the response to set the correct status 9355 * before calling the completion handler 9356 */ 9357 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 9358 9359 iabt = &abtsiocbp->iocb; 9360 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 9361 iabt->un.acxri.abortContextTag = icmd->ulpContext; 9362 if (phba->sli_rev == LPFC_SLI_REV4) { 9363 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 9364 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 9365 } 9366 else 9367 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 9368 iabt->ulpLe = 1; 9369 iabt->ulpClass = icmd->ulpClass; 9370 9371 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9372 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 9373 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 9374 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 9375 9376 if (phba->link_state >= LPFC_LINK_UP) 9377 iabt->ulpCommand = CMD_ABORT_XRI_CN; 9378 else 9379 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 9380 9381 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 9382 9383 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 9384 "0339 Abort xri x%x, original iotag x%x, " 9385 "abort cmd iotag x%x\n", 9386 iabt->un.acxri.abortIoTag, 9387 iabt->un.acxri.abortContextTag, 9388 abtsiocbp->iotag); 9389 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 9390 9391 if (retval) 9392 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9393 9394 /* 9395 * Caller to this routine should check for IOCB_ERROR 9396 * and handle it properly. This routine no longer removes 9397 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9398 */ 9399 return retval; 9400 } 9401 9402 /** 9403 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 9404 * @phba: Pointer to HBA context object. 9405 * @pring: Pointer to driver SLI ring object. 9406 * @cmdiocb: Pointer to driver command iocb object. 9407 * 9408 * This function issues an abort iocb for the provided command iocb. In case 9409 * of unloading, the abort iocb will not be issued to commands on the ELS 9410 * ring. Instead, the callback function shall be changed to those commands 9411 * so that nothing happens when them finishes. This function is called with 9412 * hbalock held. The function returns 0 when the command iocb is an abort 9413 * request. 9414 **/ 9415 int 9416 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9417 struct lpfc_iocbq *cmdiocb) 9418 { 9419 struct lpfc_vport *vport = cmdiocb->vport; 9420 int retval = IOCB_ERROR; 9421 IOCB_t *icmd = NULL; 9422 9423 /* 9424 * There are certain command types we don't want to abort. And we 9425 * don't want to abort commands that are already in the process of 9426 * being aborted. 9427 */ 9428 icmd = &cmdiocb->iocb; 9429 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9430 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9431 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9432 return 0; 9433 9434 /* 9435 * If we're unloading, don't abort iocb on the ELS ring, but change 9436 * the callback so that nothing happens when it finishes. 9437 */ 9438 if ((vport->load_flag & FC_UNLOADING) && 9439 (pring->ringno == LPFC_ELS_RING)) { 9440 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 9441 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 9442 else 9443 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 9444 goto abort_iotag_exit; 9445 } 9446 9447 /* Now, we try to issue the abort to the cmdiocb out */ 9448 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 9449 9450 abort_iotag_exit: 9451 /* 9452 * Caller to this routine should check for IOCB_ERROR 9453 * and handle it properly. This routine no longer removes 9454 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9455 */ 9456 return retval; 9457 } 9458 9459 /** 9460 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 9461 * @phba: Pointer to HBA context object. 9462 * @pring: Pointer to driver SLI ring object. 9463 * 9464 * This function aborts all iocbs in the given ring and frees all the iocb 9465 * objects in txq. This function issues abort iocbs unconditionally for all 9466 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 9467 * to complete before the return of this function. The caller is not required 9468 * to hold any locks. 9469 **/ 9470 static void 9471 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 9472 { 9473 LIST_HEAD(completions); 9474 struct lpfc_iocbq *iocb, *next_iocb; 9475 9476 if (pring->ringno == LPFC_ELS_RING) 9477 lpfc_fabric_abort_hba(phba); 9478 9479 spin_lock_irq(&phba->hbalock); 9480 9481 /* Take off all the iocbs on txq for cancelling */ 9482 list_splice_init(&pring->txq, &completions); 9483 pring->txq_cnt = 0; 9484 9485 /* Next issue ABTS for everything on the txcmplq */ 9486 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 9487 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 9488 9489 spin_unlock_irq(&phba->hbalock); 9490 9491 /* Cancel all the IOCBs from the completions list */ 9492 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9493 IOERR_SLI_ABORTED); 9494 } 9495 9496 /** 9497 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9498 * @phba: pointer to lpfc HBA data structure. 9499 * 9500 * This routine will abort all pending and outstanding iocbs to an HBA. 9501 **/ 9502 void 9503 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 9504 { 9505 struct lpfc_sli *psli = &phba->sli; 9506 struct lpfc_sli_ring *pring; 9507 int i; 9508 9509 for (i = 0; i < psli->num_rings; i++) { 9510 pring = &psli->ring[i]; 9511 lpfc_sli_iocb_ring_abort(phba, pring); 9512 } 9513 } 9514 9515 /** 9516 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 9517 * @iocbq: Pointer to driver iocb object. 9518 * @vport: Pointer to driver virtual port object. 9519 * @tgt_id: SCSI ID of the target. 9520 * @lun_id: LUN ID of the scsi device. 9521 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 9522 * 9523 * This function acts as an iocb filter for functions which abort or count 9524 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 9525 * 0 if the filtering criteria is met for the given iocb and will return 9526 * 1 if the filtering criteria is not met. 9527 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 9528 * given iocb is for the SCSI device specified by vport, tgt_id and 9529 * lun_id parameter. 9530 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 9531 * given iocb is for the SCSI target specified by vport and tgt_id 9532 * parameters. 9533 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 9534 * given iocb is for the SCSI host associated with the given vport. 9535 * This function is called with no locks held. 9536 **/ 9537 static int 9538 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 9539 uint16_t tgt_id, uint64_t lun_id, 9540 lpfc_ctx_cmd ctx_cmd) 9541 { 9542 struct lpfc_scsi_buf *lpfc_cmd; 9543 int rc = 1; 9544 9545 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 9546 return rc; 9547 9548 if (iocbq->vport != vport) 9549 return rc; 9550 9551 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 9552 9553 if (lpfc_cmd->pCmd == NULL) 9554 return rc; 9555 9556 switch (ctx_cmd) { 9557 case LPFC_CTX_LUN: 9558 if ((lpfc_cmd->rdata->pnode) && 9559 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 9560 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 9561 rc = 0; 9562 break; 9563 case LPFC_CTX_TGT: 9564 if ((lpfc_cmd->rdata->pnode) && 9565 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 9566 rc = 0; 9567 break; 9568 case LPFC_CTX_HOST: 9569 rc = 0; 9570 break; 9571 default: 9572 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 9573 __func__, ctx_cmd); 9574 break; 9575 } 9576 9577 return rc; 9578 } 9579 9580 /** 9581 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 9582 * @vport: Pointer to virtual port. 9583 * @tgt_id: SCSI ID of the target. 9584 * @lun_id: LUN ID of the scsi device. 9585 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9586 * 9587 * This function returns number of FCP commands pending for the vport. 9588 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 9589 * commands pending on the vport associated with SCSI device specified 9590 * by tgt_id and lun_id parameters. 9591 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 9592 * commands pending on the vport associated with SCSI target specified 9593 * by tgt_id parameter. 9594 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 9595 * commands pending on the vport. 9596 * This function returns the number of iocbs which satisfy the filter. 9597 * This function is called without any lock held. 9598 **/ 9599 int 9600 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 9601 lpfc_ctx_cmd ctx_cmd) 9602 { 9603 struct lpfc_hba *phba = vport->phba; 9604 struct lpfc_iocbq *iocbq; 9605 int sum, i; 9606 9607 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 9608 iocbq = phba->sli.iocbq_lookup[i]; 9609 9610 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 9611 ctx_cmd) == 0) 9612 sum++; 9613 } 9614 9615 return sum; 9616 } 9617 9618 /** 9619 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 9620 * @phba: Pointer to HBA context object 9621 * @cmdiocb: Pointer to command iocb object. 9622 * @rspiocb: Pointer to response iocb object. 9623 * 9624 * This function is called when an aborted FCP iocb completes. This 9625 * function is called by the ring event handler with no lock held. 9626 * This function frees the iocb. 9627 **/ 9628 void 9629 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9630 struct lpfc_iocbq *rspiocb) 9631 { 9632 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9633 "3096 ABORT_XRI_CN completing on xri x%x " 9634 "original iotag x%x, abort cmd iotag x%x " 9635 "status 0x%x, reason 0x%x\n", 9636 cmdiocb->iocb.un.acxri.abortContextTag, 9637 cmdiocb->iocb.un.acxri.abortIoTag, 9638 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 9639 rspiocb->iocb.un.ulpWord[4]); 9640 lpfc_sli_release_iocbq(phba, cmdiocb); 9641 return; 9642 } 9643 9644 /** 9645 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 9646 * @vport: Pointer to virtual port. 9647 * @pring: Pointer to driver SLI ring object. 9648 * @tgt_id: SCSI ID of the target. 9649 * @lun_id: LUN ID of the scsi device. 9650 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9651 * 9652 * This function sends an abort command for every SCSI command 9653 * associated with the given virtual port pending on the ring 9654 * filtered by lpfc_sli_validate_fcp_iocb function. 9655 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 9656 * FCP iocbs associated with lun specified by tgt_id and lun_id 9657 * parameters 9658 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 9659 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 9660 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 9661 * FCP iocbs associated with virtual port. 9662 * This function returns number of iocbs it failed to abort. 9663 * This function is called with no locks held. 9664 **/ 9665 int 9666 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 9667 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 9668 { 9669 struct lpfc_hba *phba = vport->phba; 9670 struct lpfc_iocbq *iocbq; 9671 struct lpfc_iocbq *abtsiocb; 9672 IOCB_t *cmd = NULL; 9673 int errcnt = 0, ret_val = 0; 9674 int i; 9675 9676 for (i = 1; i <= phba->sli.last_iotag; i++) { 9677 iocbq = phba->sli.iocbq_lookup[i]; 9678 9679 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 9680 abort_cmd) != 0) 9681 continue; 9682 9683 /* issue ABTS for this IOCB based on iotag */ 9684 abtsiocb = lpfc_sli_get_iocbq(phba); 9685 if (abtsiocb == NULL) { 9686 errcnt++; 9687 continue; 9688 } 9689 9690 cmd = &iocbq->iocb; 9691 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 9692 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 9693 if (phba->sli_rev == LPFC_SLI_REV4) 9694 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 9695 else 9696 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 9697 abtsiocb->iocb.ulpLe = 1; 9698 abtsiocb->iocb.ulpClass = cmd->ulpClass; 9699 abtsiocb->vport = phba->pport; 9700 9701 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9702 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 9703 if (iocbq->iocb_flag & LPFC_IO_FCP) 9704 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 9705 9706 if (lpfc_is_link_up(phba)) 9707 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 9708 else 9709 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 9710 9711 /* Setup callback routine and issue the command. */ 9712 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 9713 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 9714 abtsiocb, 0); 9715 if (ret_val == IOCB_ERROR) { 9716 lpfc_sli_release_iocbq(phba, abtsiocb); 9717 errcnt++; 9718 continue; 9719 } 9720 } 9721 9722 return errcnt; 9723 } 9724 9725 /** 9726 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 9727 * @phba: Pointer to HBA context object. 9728 * @cmdiocbq: Pointer to command iocb. 9729 * @rspiocbq: Pointer to response iocb. 9730 * 9731 * This function is the completion handler for iocbs issued using 9732 * lpfc_sli_issue_iocb_wait function. This function is called by the 9733 * ring event handler function without any lock held. This function 9734 * can be called from both worker thread context and interrupt 9735 * context. This function also can be called from other thread which 9736 * cleans up the SLI layer objects. 9737 * This function copy the contents of the response iocb to the 9738 * response iocb memory object provided by the caller of 9739 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 9740 * sleeps for the iocb completion. 9741 **/ 9742 static void 9743 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 9744 struct lpfc_iocbq *cmdiocbq, 9745 struct lpfc_iocbq *rspiocbq) 9746 { 9747 wait_queue_head_t *pdone_q; 9748 unsigned long iflags; 9749 struct lpfc_scsi_buf *lpfc_cmd; 9750 9751 spin_lock_irqsave(&phba->hbalock, iflags); 9752 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 9753 if (cmdiocbq->context2 && rspiocbq) 9754 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 9755 &rspiocbq->iocb, sizeof(IOCB_t)); 9756 9757 /* Set the exchange busy flag for task management commands */ 9758 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 9759 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 9760 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 9761 cur_iocbq); 9762 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 9763 } 9764 9765 pdone_q = cmdiocbq->context_un.wait_queue; 9766 if (pdone_q) 9767 wake_up(pdone_q); 9768 spin_unlock_irqrestore(&phba->hbalock, iflags); 9769 return; 9770 } 9771 9772 /** 9773 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 9774 * @phba: Pointer to HBA context object.. 9775 * @piocbq: Pointer to command iocb. 9776 * @flag: Flag to test. 9777 * 9778 * This routine grabs the hbalock and then test the iocb_flag to 9779 * see if the passed in flag is set. 9780 * Returns: 9781 * 1 if flag is set. 9782 * 0 if flag is not set. 9783 **/ 9784 static int 9785 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 9786 struct lpfc_iocbq *piocbq, uint32_t flag) 9787 { 9788 unsigned long iflags; 9789 int ret; 9790 9791 spin_lock_irqsave(&phba->hbalock, iflags); 9792 ret = piocbq->iocb_flag & flag; 9793 spin_unlock_irqrestore(&phba->hbalock, iflags); 9794 return ret; 9795 9796 } 9797 9798 /** 9799 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 9800 * @phba: Pointer to HBA context object.. 9801 * @pring: Pointer to sli ring. 9802 * @piocb: Pointer to command iocb. 9803 * @prspiocbq: Pointer to response iocb. 9804 * @timeout: Timeout in number of seconds. 9805 * 9806 * This function issues the iocb to firmware and waits for the 9807 * iocb to complete. If the iocb command is not 9808 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 9809 * Caller should not free the iocb resources if this function 9810 * returns IOCB_TIMEDOUT. 9811 * The function waits for the iocb completion using an 9812 * non-interruptible wait. 9813 * This function will sleep while waiting for iocb completion. 9814 * So, this function should not be called from any context which 9815 * does not allow sleeping. Due to the same reason, this function 9816 * cannot be called with interrupt disabled. 9817 * This function assumes that the iocb completions occur while 9818 * this function sleep. So, this function cannot be called from 9819 * the thread which process iocb completion for this ring. 9820 * This function clears the iocb_flag of the iocb object before 9821 * issuing the iocb and the iocb completion handler sets this 9822 * flag and wakes this thread when the iocb completes. 9823 * The contents of the response iocb will be copied to prspiocbq 9824 * by the completion handler when the command completes. 9825 * This function returns IOCB_SUCCESS when success. 9826 * This function is called with no lock held. 9827 **/ 9828 int 9829 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 9830 uint32_t ring_number, 9831 struct lpfc_iocbq *piocb, 9832 struct lpfc_iocbq *prspiocbq, 9833 uint32_t timeout) 9834 { 9835 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9836 long timeleft, timeout_req = 0; 9837 int retval = IOCB_SUCCESS; 9838 uint32_t creg_val; 9839 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 9840 /* 9841 * If the caller has provided a response iocbq buffer, then context2 9842 * is NULL or its an error. 9843 */ 9844 if (prspiocbq) { 9845 if (piocb->context2) 9846 return IOCB_ERROR; 9847 piocb->context2 = prspiocbq; 9848 } 9849 9850 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 9851 piocb->context_un.wait_queue = &done_q; 9852 piocb->iocb_flag &= ~LPFC_IO_WAKE; 9853 9854 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9855 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9856 return IOCB_ERROR; 9857 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 9858 writel(creg_val, phba->HCregaddr); 9859 readl(phba->HCregaddr); /* flush */ 9860 } 9861 9862 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 9863 SLI_IOCB_RET_IOCB); 9864 if (retval == IOCB_SUCCESS) { 9865 timeout_req = timeout * HZ; 9866 timeleft = wait_event_timeout(done_q, 9867 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 9868 timeout_req); 9869 9870 if (piocb->iocb_flag & LPFC_IO_WAKE) { 9871 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9872 "0331 IOCB wake signaled\n"); 9873 } else if (timeleft == 0) { 9874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9875 "0338 IOCB wait timeout error - no " 9876 "wake response Data x%x\n", timeout); 9877 retval = IOCB_TIMEDOUT; 9878 } else { 9879 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9880 "0330 IOCB wake NOT set, " 9881 "Data x%x x%lx\n", 9882 timeout, (timeleft / jiffies)); 9883 retval = IOCB_TIMEDOUT; 9884 } 9885 } else if (retval == IOCB_BUSY) { 9886 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9887 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 9888 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 9889 return retval; 9890 } else { 9891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9892 "0332 IOCB wait issue failed, Data x%x\n", 9893 retval); 9894 retval = IOCB_ERROR; 9895 } 9896 9897 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9898 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9899 return IOCB_ERROR; 9900 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 9901 writel(creg_val, phba->HCregaddr); 9902 readl(phba->HCregaddr); /* flush */ 9903 } 9904 9905 if (prspiocbq) 9906 piocb->context2 = NULL; 9907 9908 piocb->context_un.wait_queue = NULL; 9909 piocb->iocb_cmpl = NULL; 9910 return retval; 9911 } 9912 9913 /** 9914 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 9915 * @phba: Pointer to HBA context object. 9916 * @pmboxq: Pointer to driver mailbox object. 9917 * @timeout: Timeout in number of seconds. 9918 * 9919 * This function issues the mailbox to firmware and waits for the 9920 * mailbox command to complete. If the mailbox command is not 9921 * completed within timeout seconds, it returns MBX_TIMEOUT. 9922 * The function waits for the mailbox completion using an 9923 * interruptible wait. If the thread is woken up due to a 9924 * signal, MBX_TIMEOUT error is returned to the caller. Caller 9925 * should not free the mailbox resources, if this function returns 9926 * MBX_TIMEOUT. 9927 * This function will sleep while waiting for mailbox completion. 9928 * So, this function should not be called from any context which 9929 * does not allow sleeping. Due to the same reason, this function 9930 * cannot be called with interrupt disabled. 9931 * This function assumes that the mailbox completion occurs while 9932 * this function sleep. So, this function cannot be called from 9933 * the worker thread which processes mailbox completion. 9934 * This function is called in the context of HBA management 9935 * applications. 9936 * This function returns MBX_SUCCESS when successful. 9937 * This function is called with no lock held. 9938 **/ 9939 int 9940 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 9941 uint32_t timeout) 9942 { 9943 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9944 int retval; 9945 unsigned long flag; 9946 9947 /* The caller must leave context1 empty. */ 9948 if (pmboxq->context1) 9949 return MBX_NOT_FINISHED; 9950 9951 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 9952 /* setup wake call as IOCB callback */ 9953 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 9954 /* setup context field to pass wait_queue pointer to wake function */ 9955 pmboxq->context1 = &done_q; 9956 9957 /* now issue the command */ 9958 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 9959 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 9960 wait_event_interruptible_timeout(done_q, 9961 pmboxq->mbox_flag & LPFC_MBX_WAKE, 9962 timeout * HZ); 9963 9964 spin_lock_irqsave(&phba->hbalock, flag); 9965 pmboxq->context1 = NULL; 9966 /* 9967 * if LPFC_MBX_WAKE flag is set the mailbox is completed 9968 * else do not free the resources. 9969 */ 9970 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 9971 retval = MBX_SUCCESS; 9972 lpfc_sli4_swap_str(phba, pmboxq); 9973 } else { 9974 retval = MBX_TIMEOUT; 9975 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 9976 } 9977 spin_unlock_irqrestore(&phba->hbalock, flag); 9978 } 9979 9980 return retval; 9981 } 9982 9983 /** 9984 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 9985 * @phba: Pointer to HBA context. 9986 * 9987 * This function is called to shutdown the driver's mailbox sub-system. 9988 * It first marks the mailbox sub-system is in a block state to prevent 9989 * the asynchronous mailbox command from issued off the pending mailbox 9990 * command queue. If the mailbox command sub-system shutdown is due to 9991 * HBA error conditions such as EEH or ERATT, this routine shall invoke 9992 * the mailbox sub-system flush routine to forcefully bring down the 9993 * mailbox sub-system. Otherwise, if it is due to normal condition (such 9994 * as with offline or HBA function reset), this routine will wait for the 9995 * outstanding mailbox command to complete before invoking the mailbox 9996 * sub-system flush routine to gracefully bring down mailbox sub-system. 9997 **/ 9998 void 9999 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 10000 { 10001 struct lpfc_sli *psli = &phba->sli; 10002 unsigned long timeout; 10003 10004 if (mbx_action == LPFC_MBX_NO_WAIT) { 10005 /* delay 100ms for port state */ 10006 msleep(100); 10007 lpfc_sli_mbox_sys_flush(phba); 10008 return; 10009 } 10010 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 10011 10012 spin_lock_irq(&phba->hbalock); 10013 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 10014 10015 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 10016 /* Determine how long we might wait for the active mailbox 10017 * command to be gracefully completed by firmware. 10018 */ 10019 if (phba->sli.mbox_active) 10020 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 10021 phba->sli.mbox_active) * 10022 1000) + jiffies; 10023 spin_unlock_irq(&phba->hbalock); 10024 10025 while (phba->sli.mbox_active) { 10026 /* Check active mailbox complete status every 2ms */ 10027 msleep(2); 10028 if (time_after(jiffies, timeout)) 10029 /* Timeout, let the mailbox flush routine to 10030 * forcefully release active mailbox command 10031 */ 10032 break; 10033 } 10034 } else 10035 spin_unlock_irq(&phba->hbalock); 10036 10037 lpfc_sli_mbox_sys_flush(phba); 10038 } 10039 10040 /** 10041 * lpfc_sli_eratt_read - read sli-3 error attention events 10042 * @phba: Pointer to HBA context. 10043 * 10044 * This function is called to read the SLI3 device error attention registers 10045 * for possible error attention events. The caller must hold the hostlock 10046 * with spin_lock_irq(). 10047 * 10048 * This function returns 1 when there is Error Attention in the Host Attention 10049 * Register and returns 0 otherwise. 10050 **/ 10051 static int 10052 lpfc_sli_eratt_read(struct lpfc_hba *phba) 10053 { 10054 uint32_t ha_copy; 10055 10056 /* Read chip Host Attention (HA) register */ 10057 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10058 goto unplug_err; 10059 10060 if (ha_copy & HA_ERATT) { 10061 /* Read host status register to retrieve error event */ 10062 if (lpfc_sli_read_hs(phba)) 10063 goto unplug_err; 10064 10065 /* Check if there is a deferred error condition is active */ 10066 if ((HS_FFER1 & phba->work_hs) && 10067 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10068 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 10069 phba->hba_flag |= DEFER_ERATT; 10070 /* Clear all interrupt enable conditions */ 10071 writel(0, phba->HCregaddr); 10072 readl(phba->HCregaddr); 10073 } 10074 10075 /* Set the driver HA work bitmap */ 10076 phba->work_ha |= HA_ERATT; 10077 /* Indicate polling handles this ERATT */ 10078 phba->hba_flag |= HBA_ERATT_HANDLED; 10079 return 1; 10080 } 10081 return 0; 10082 10083 unplug_err: 10084 /* Set the driver HS work bitmap */ 10085 phba->work_hs |= UNPLUG_ERR; 10086 /* Set the driver HA work bitmap */ 10087 phba->work_ha |= HA_ERATT; 10088 /* Indicate polling handles this ERATT */ 10089 phba->hba_flag |= HBA_ERATT_HANDLED; 10090 return 1; 10091 } 10092 10093 /** 10094 * lpfc_sli4_eratt_read - read sli-4 error attention events 10095 * @phba: Pointer to HBA context. 10096 * 10097 * This function is called to read the SLI4 device error attention registers 10098 * for possible error attention events. The caller must hold the hostlock 10099 * with spin_lock_irq(). 10100 * 10101 * This function returns 1 when there is Error Attention in the Host Attention 10102 * Register and returns 0 otherwise. 10103 **/ 10104 static int 10105 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 10106 { 10107 uint32_t uerr_sta_hi, uerr_sta_lo; 10108 uint32_t if_type, portsmphr; 10109 struct lpfc_register portstat_reg; 10110 10111 /* 10112 * For now, use the SLI4 device internal unrecoverable error 10113 * registers for error attention. This can be changed later. 10114 */ 10115 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10116 switch (if_type) { 10117 case LPFC_SLI_INTF_IF_TYPE_0: 10118 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 10119 &uerr_sta_lo) || 10120 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 10121 &uerr_sta_hi)) { 10122 phba->work_hs |= UNPLUG_ERR; 10123 phba->work_ha |= HA_ERATT; 10124 phba->hba_flag |= HBA_ERATT_HANDLED; 10125 return 1; 10126 } 10127 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 10128 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 10129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10130 "1423 HBA Unrecoverable error: " 10131 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 10132 "ue_mask_lo_reg=0x%x, " 10133 "ue_mask_hi_reg=0x%x\n", 10134 uerr_sta_lo, uerr_sta_hi, 10135 phba->sli4_hba.ue_mask_lo, 10136 phba->sli4_hba.ue_mask_hi); 10137 phba->work_status[0] = uerr_sta_lo; 10138 phba->work_status[1] = uerr_sta_hi; 10139 phba->work_ha |= HA_ERATT; 10140 phba->hba_flag |= HBA_ERATT_HANDLED; 10141 return 1; 10142 } 10143 break; 10144 case LPFC_SLI_INTF_IF_TYPE_2: 10145 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 10146 &portstat_reg.word0) || 10147 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 10148 &portsmphr)){ 10149 phba->work_hs |= UNPLUG_ERR; 10150 phba->work_ha |= HA_ERATT; 10151 phba->hba_flag |= HBA_ERATT_HANDLED; 10152 return 1; 10153 } 10154 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 10155 phba->work_status[0] = 10156 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 10157 phba->work_status[1] = 10158 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 10159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10160 "2885 Port Status Event: " 10161 "port status reg 0x%x, " 10162 "port smphr reg 0x%x, " 10163 "error 1=0x%x, error 2=0x%x\n", 10164 portstat_reg.word0, 10165 portsmphr, 10166 phba->work_status[0], 10167 phba->work_status[1]); 10168 phba->work_ha |= HA_ERATT; 10169 phba->hba_flag |= HBA_ERATT_HANDLED; 10170 return 1; 10171 } 10172 break; 10173 case LPFC_SLI_INTF_IF_TYPE_1: 10174 default: 10175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10176 "2886 HBA Error Attention on unsupported " 10177 "if type %d.", if_type); 10178 return 1; 10179 } 10180 10181 return 0; 10182 } 10183 10184 /** 10185 * lpfc_sli_check_eratt - check error attention events 10186 * @phba: Pointer to HBA context. 10187 * 10188 * This function is called from timer soft interrupt context to check HBA's 10189 * error attention register bit for error attention events. 10190 * 10191 * This function returns 1 when there is Error Attention in the Host Attention 10192 * Register and returns 0 otherwise. 10193 **/ 10194 int 10195 lpfc_sli_check_eratt(struct lpfc_hba *phba) 10196 { 10197 uint32_t ha_copy; 10198 10199 /* If somebody is waiting to handle an eratt, don't process it 10200 * here. The brdkill function will do this. 10201 */ 10202 if (phba->link_flag & LS_IGNORE_ERATT) 10203 return 0; 10204 10205 /* Check if interrupt handler handles this ERATT */ 10206 spin_lock_irq(&phba->hbalock); 10207 if (phba->hba_flag & HBA_ERATT_HANDLED) { 10208 /* Interrupt handler has handled ERATT */ 10209 spin_unlock_irq(&phba->hbalock); 10210 return 0; 10211 } 10212 10213 /* 10214 * If there is deferred error attention, do not check for error 10215 * attention 10216 */ 10217 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10218 spin_unlock_irq(&phba->hbalock); 10219 return 0; 10220 } 10221 10222 /* If PCI channel is offline, don't process it */ 10223 if (unlikely(pci_channel_offline(phba->pcidev))) { 10224 spin_unlock_irq(&phba->hbalock); 10225 return 0; 10226 } 10227 10228 switch (phba->sli_rev) { 10229 case LPFC_SLI_REV2: 10230 case LPFC_SLI_REV3: 10231 /* Read chip Host Attention (HA) register */ 10232 ha_copy = lpfc_sli_eratt_read(phba); 10233 break; 10234 case LPFC_SLI_REV4: 10235 /* Read device Uncoverable Error (UERR) registers */ 10236 ha_copy = lpfc_sli4_eratt_read(phba); 10237 break; 10238 default: 10239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10240 "0299 Invalid SLI revision (%d)\n", 10241 phba->sli_rev); 10242 ha_copy = 0; 10243 break; 10244 } 10245 spin_unlock_irq(&phba->hbalock); 10246 10247 return ha_copy; 10248 } 10249 10250 /** 10251 * lpfc_intr_state_check - Check device state for interrupt handling 10252 * @phba: Pointer to HBA context. 10253 * 10254 * This inline routine checks whether a device or its PCI slot is in a state 10255 * that the interrupt should be handled. 10256 * 10257 * This function returns 0 if the device or the PCI slot is in a state that 10258 * interrupt should be handled, otherwise -EIO. 10259 */ 10260 static inline int 10261 lpfc_intr_state_check(struct lpfc_hba *phba) 10262 { 10263 /* If the pci channel is offline, ignore all the interrupts */ 10264 if (unlikely(pci_channel_offline(phba->pcidev))) 10265 return -EIO; 10266 10267 /* Update device level interrupt statistics */ 10268 phba->sli.slistat.sli_intr++; 10269 10270 /* Ignore all interrupts during initialization. */ 10271 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10272 return -EIO; 10273 10274 return 0; 10275 } 10276 10277 /** 10278 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 10279 * @irq: Interrupt number. 10280 * @dev_id: The device context pointer. 10281 * 10282 * This function is directly called from the PCI layer as an interrupt 10283 * service routine when device with SLI-3 interface spec is enabled with 10284 * MSI-X multi-message interrupt mode and there are slow-path events in 10285 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 10286 * interrupt mode, this function is called as part of the device-level 10287 * interrupt handler. When the PCI slot is in error recovery or the HBA 10288 * is undergoing initialization, the interrupt handler will not process 10289 * the interrupt. The link attention and ELS ring attention events are 10290 * handled by the worker thread. The interrupt handler signals the worker 10291 * thread and returns for these events. This function is called without 10292 * any lock held. It gets the hbalock to access and update SLI data 10293 * structures. 10294 * 10295 * This function returns IRQ_HANDLED when interrupt is handled else it 10296 * returns IRQ_NONE. 10297 **/ 10298 irqreturn_t 10299 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 10300 { 10301 struct lpfc_hba *phba; 10302 uint32_t ha_copy, hc_copy; 10303 uint32_t work_ha_copy; 10304 unsigned long status; 10305 unsigned long iflag; 10306 uint32_t control; 10307 10308 MAILBOX_t *mbox, *pmbox; 10309 struct lpfc_vport *vport; 10310 struct lpfc_nodelist *ndlp; 10311 struct lpfc_dmabuf *mp; 10312 LPFC_MBOXQ_t *pmb; 10313 int rc; 10314 10315 /* 10316 * Get the driver's phba structure from the dev_id and 10317 * assume the HBA is not interrupting. 10318 */ 10319 phba = (struct lpfc_hba *)dev_id; 10320 10321 if (unlikely(!phba)) 10322 return IRQ_NONE; 10323 10324 /* 10325 * Stuff needs to be attented to when this function is invoked as an 10326 * individual interrupt handler in MSI-X multi-message interrupt mode 10327 */ 10328 if (phba->intr_type == MSIX) { 10329 /* Check device state for handling interrupt */ 10330 if (lpfc_intr_state_check(phba)) 10331 return IRQ_NONE; 10332 /* Need to read HA REG for slow-path events */ 10333 spin_lock_irqsave(&phba->hbalock, iflag); 10334 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10335 goto unplug_error; 10336 /* If somebody is waiting to handle an eratt don't process it 10337 * here. The brdkill function will do this. 10338 */ 10339 if (phba->link_flag & LS_IGNORE_ERATT) 10340 ha_copy &= ~HA_ERATT; 10341 /* Check the need for handling ERATT in interrupt handler */ 10342 if (ha_copy & HA_ERATT) { 10343 if (phba->hba_flag & HBA_ERATT_HANDLED) 10344 /* ERATT polling has handled ERATT */ 10345 ha_copy &= ~HA_ERATT; 10346 else 10347 /* Indicate interrupt handler handles ERATT */ 10348 phba->hba_flag |= HBA_ERATT_HANDLED; 10349 } 10350 10351 /* 10352 * If there is deferred error attention, do not check for any 10353 * interrupt. 10354 */ 10355 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10356 spin_unlock_irqrestore(&phba->hbalock, iflag); 10357 return IRQ_NONE; 10358 } 10359 10360 /* Clear up only attention source related to slow-path */ 10361 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 10362 goto unplug_error; 10363 10364 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 10365 HC_LAINT_ENA | HC_ERINT_ENA), 10366 phba->HCregaddr); 10367 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 10368 phba->HAregaddr); 10369 writel(hc_copy, phba->HCregaddr); 10370 readl(phba->HAregaddr); /* flush */ 10371 spin_unlock_irqrestore(&phba->hbalock, iflag); 10372 } else 10373 ha_copy = phba->ha_copy; 10374 10375 work_ha_copy = ha_copy & phba->work_ha_mask; 10376 10377 if (work_ha_copy) { 10378 if (work_ha_copy & HA_LATT) { 10379 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 10380 /* 10381 * Turn off Link Attention interrupts 10382 * until CLEAR_LA done 10383 */ 10384 spin_lock_irqsave(&phba->hbalock, iflag); 10385 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 10386 if (lpfc_readl(phba->HCregaddr, &control)) 10387 goto unplug_error; 10388 control &= ~HC_LAINT_ENA; 10389 writel(control, phba->HCregaddr); 10390 readl(phba->HCregaddr); /* flush */ 10391 spin_unlock_irqrestore(&phba->hbalock, iflag); 10392 } 10393 else 10394 work_ha_copy &= ~HA_LATT; 10395 } 10396 10397 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 10398 /* 10399 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 10400 * the only slow ring. 10401 */ 10402 status = (work_ha_copy & 10403 (HA_RXMASK << (4*LPFC_ELS_RING))); 10404 status >>= (4*LPFC_ELS_RING); 10405 if (status & HA_RXMASK) { 10406 spin_lock_irqsave(&phba->hbalock, iflag); 10407 if (lpfc_readl(phba->HCregaddr, &control)) 10408 goto unplug_error; 10409 10410 lpfc_debugfs_slow_ring_trc(phba, 10411 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 10412 control, status, 10413 (uint32_t)phba->sli.slistat.sli_intr); 10414 10415 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 10416 lpfc_debugfs_slow_ring_trc(phba, 10417 "ISR Disable ring:" 10418 "pwork:x%x hawork:x%x wait:x%x", 10419 phba->work_ha, work_ha_copy, 10420 (uint32_t)((unsigned long) 10421 &phba->work_waitq)); 10422 10423 control &= 10424 ~(HC_R0INT_ENA << LPFC_ELS_RING); 10425 writel(control, phba->HCregaddr); 10426 readl(phba->HCregaddr); /* flush */ 10427 } 10428 else { 10429 lpfc_debugfs_slow_ring_trc(phba, 10430 "ISR slow ring: pwork:" 10431 "x%x hawork:x%x wait:x%x", 10432 phba->work_ha, work_ha_copy, 10433 (uint32_t)((unsigned long) 10434 &phba->work_waitq)); 10435 } 10436 spin_unlock_irqrestore(&phba->hbalock, iflag); 10437 } 10438 } 10439 spin_lock_irqsave(&phba->hbalock, iflag); 10440 if (work_ha_copy & HA_ERATT) { 10441 if (lpfc_sli_read_hs(phba)) 10442 goto unplug_error; 10443 /* 10444 * Check if there is a deferred error condition 10445 * is active 10446 */ 10447 if ((HS_FFER1 & phba->work_hs) && 10448 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10449 HS_FFER6 | HS_FFER7 | HS_FFER8) & 10450 phba->work_hs)) { 10451 phba->hba_flag |= DEFER_ERATT; 10452 /* Clear all interrupt enable conditions */ 10453 writel(0, phba->HCregaddr); 10454 readl(phba->HCregaddr); 10455 } 10456 } 10457 10458 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 10459 pmb = phba->sli.mbox_active; 10460 pmbox = &pmb->u.mb; 10461 mbox = phba->mbox; 10462 vport = pmb->vport; 10463 10464 /* First check out the status word */ 10465 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 10466 if (pmbox->mbxOwner != OWN_HOST) { 10467 spin_unlock_irqrestore(&phba->hbalock, iflag); 10468 /* 10469 * Stray Mailbox Interrupt, mbxCommand <cmd> 10470 * mbxStatus <status> 10471 */ 10472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10473 LOG_SLI, 10474 "(%d):0304 Stray Mailbox " 10475 "Interrupt mbxCommand x%x " 10476 "mbxStatus x%x\n", 10477 (vport ? vport->vpi : 0), 10478 pmbox->mbxCommand, 10479 pmbox->mbxStatus); 10480 /* clear mailbox attention bit */ 10481 work_ha_copy &= ~HA_MBATT; 10482 } else { 10483 phba->sli.mbox_active = NULL; 10484 spin_unlock_irqrestore(&phba->hbalock, iflag); 10485 phba->last_completion_time = jiffies; 10486 del_timer(&phba->sli.mbox_tmo); 10487 if (pmb->mbox_cmpl) { 10488 lpfc_sli_pcimem_bcopy(mbox, pmbox, 10489 MAILBOX_CMD_SIZE); 10490 if (pmb->out_ext_byte_len && 10491 pmb->context2) 10492 lpfc_sli_pcimem_bcopy( 10493 phba->mbox_ext, 10494 pmb->context2, 10495 pmb->out_ext_byte_len); 10496 } 10497 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10498 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10499 10500 lpfc_debugfs_disc_trc(vport, 10501 LPFC_DISC_TRC_MBOX_VPORT, 10502 "MBOX dflt rpi: : " 10503 "status:x%x rpi:x%x", 10504 (uint32_t)pmbox->mbxStatus, 10505 pmbox->un.varWords[0], 0); 10506 10507 if (!pmbox->mbxStatus) { 10508 mp = (struct lpfc_dmabuf *) 10509 (pmb->context1); 10510 ndlp = (struct lpfc_nodelist *) 10511 pmb->context2; 10512 10513 /* Reg_LOGIN of dflt RPI was 10514 * successful. new lets get 10515 * rid of the RPI using the 10516 * same mbox buffer. 10517 */ 10518 lpfc_unreg_login(phba, 10519 vport->vpi, 10520 pmbox->un.varWords[0], 10521 pmb); 10522 pmb->mbox_cmpl = 10523 lpfc_mbx_cmpl_dflt_rpi; 10524 pmb->context1 = mp; 10525 pmb->context2 = ndlp; 10526 pmb->vport = vport; 10527 rc = lpfc_sli_issue_mbox(phba, 10528 pmb, 10529 MBX_NOWAIT); 10530 if (rc != MBX_BUSY) 10531 lpfc_printf_log(phba, 10532 KERN_ERR, 10533 LOG_MBOX | LOG_SLI, 10534 "0350 rc should have" 10535 "been MBX_BUSY\n"); 10536 if (rc != MBX_NOT_FINISHED) 10537 goto send_current_mbox; 10538 } 10539 } 10540 spin_lock_irqsave( 10541 &phba->pport->work_port_lock, 10542 iflag); 10543 phba->pport->work_port_events &= 10544 ~WORKER_MBOX_TMO; 10545 spin_unlock_irqrestore( 10546 &phba->pport->work_port_lock, 10547 iflag); 10548 lpfc_mbox_cmpl_put(phba, pmb); 10549 } 10550 } else 10551 spin_unlock_irqrestore(&phba->hbalock, iflag); 10552 10553 if ((work_ha_copy & HA_MBATT) && 10554 (phba->sli.mbox_active == NULL)) { 10555 send_current_mbox: 10556 /* Process next mailbox command if there is one */ 10557 do { 10558 rc = lpfc_sli_issue_mbox(phba, NULL, 10559 MBX_NOWAIT); 10560 } while (rc == MBX_NOT_FINISHED); 10561 if (rc != MBX_SUCCESS) 10562 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10563 LOG_SLI, "0349 rc should be " 10564 "MBX_SUCCESS\n"); 10565 } 10566 10567 spin_lock_irqsave(&phba->hbalock, iflag); 10568 phba->work_ha |= work_ha_copy; 10569 spin_unlock_irqrestore(&phba->hbalock, iflag); 10570 lpfc_worker_wake_up(phba); 10571 } 10572 return IRQ_HANDLED; 10573 unplug_error: 10574 spin_unlock_irqrestore(&phba->hbalock, iflag); 10575 return IRQ_HANDLED; 10576 10577 } /* lpfc_sli_sp_intr_handler */ 10578 10579 /** 10580 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 10581 * @irq: Interrupt number. 10582 * @dev_id: The device context pointer. 10583 * 10584 * This function is directly called from the PCI layer as an interrupt 10585 * service routine when device with SLI-3 interface spec is enabled with 10586 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 10587 * ring event in the HBA. However, when the device is enabled with either 10588 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 10589 * device-level interrupt handler. When the PCI slot is in error recovery 10590 * or the HBA is undergoing initialization, the interrupt handler will not 10591 * process the interrupt. The SCSI FCP fast-path ring event are handled in 10592 * the intrrupt context. This function is called without any lock held. 10593 * It gets the hbalock to access and update SLI data structures. 10594 * 10595 * This function returns IRQ_HANDLED when interrupt is handled else it 10596 * returns IRQ_NONE. 10597 **/ 10598 irqreturn_t 10599 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 10600 { 10601 struct lpfc_hba *phba; 10602 uint32_t ha_copy; 10603 unsigned long status; 10604 unsigned long iflag; 10605 10606 /* Get the driver's phba structure from the dev_id and 10607 * assume the HBA is not interrupting. 10608 */ 10609 phba = (struct lpfc_hba *) dev_id; 10610 10611 if (unlikely(!phba)) 10612 return IRQ_NONE; 10613 10614 /* 10615 * Stuff needs to be attented to when this function is invoked as an 10616 * individual interrupt handler in MSI-X multi-message interrupt mode 10617 */ 10618 if (phba->intr_type == MSIX) { 10619 /* Check device state for handling interrupt */ 10620 if (lpfc_intr_state_check(phba)) 10621 return IRQ_NONE; 10622 /* Need to read HA REG for FCP ring and other ring events */ 10623 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10624 return IRQ_HANDLED; 10625 /* Clear up only attention source related to fast-path */ 10626 spin_lock_irqsave(&phba->hbalock, iflag); 10627 /* 10628 * If there is deferred error attention, do not check for 10629 * any interrupt. 10630 */ 10631 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10632 spin_unlock_irqrestore(&phba->hbalock, iflag); 10633 return IRQ_NONE; 10634 } 10635 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 10636 phba->HAregaddr); 10637 readl(phba->HAregaddr); /* flush */ 10638 spin_unlock_irqrestore(&phba->hbalock, iflag); 10639 } else 10640 ha_copy = phba->ha_copy; 10641 10642 /* 10643 * Process all events on FCP ring. Take the optimized path for FCP IO. 10644 */ 10645 ha_copy &= ~(phba->work_ha_mask); 10646 10647 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10648 status >>= (4*LPFC_FCP_RING); 10649 if (status & HA_RXMASK) 10650 lpfc_sli_handle_fast_ring_event(phba, 10651 &phba->sli.ring[LPFC_FCP_RING], 10652 status); 10653 10654 if (phba->cfg_multi_ring_support == 2) { 10655 /* 10656 * Process all events on extra ring. Take the optimized path 10657 * for extra ring IO. 10658 */ 10659 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10660 status >>= (4*LPFC_EXTRA_RING); 10661 if (status & HA_RXMASK) { 10662 lpfc_sli_handle_fast_ring_event(phba, 10663 &phba->sli.ring[LPFC_EXTRA_RING], 10664 status); 10665 } 10666 } 10667 return IRQ_HANDLED; 10668 } /* lpfc_sli_fp_intr_handler */ 10669 10670 /** 10671 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 10672 * @irq: Interrupt number. 10673 * @dev_id: The device context pointer. 10674 * 10675 * This function is the HBA device-level interrupt handler to device with 10676 * SLI-3 interface spec, called from the PCI layer when either MSI or 10677 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 10678 * requires driver attention. This function invokes the slow-path interrupt 10679 * attention handling function and fast-path interrupt attention handling 10680 * function in turn to process the relevant HBA attention events. This 10681 * function is called without any lock held. It gets the hbalock to access 10682 * and update SLI data structures. 10683 * 10684 * This function returns IRQ_HANDLED when interrupt is handled, else it 10685 * returns IRQ_NONE. 10686 **/ 10687 irqreturn_t 10688 lpfc_sli_intr_handler(int irq, void *dev_id) 10689 { 10690 struct lpfc_hba *phba; 10691 irqreturn_t sp_irq_rc, fp_irq_rc; 10692 unsigned long status1, status2; 10693 uint32_t hc_copy; 10694 10695 /* 10696 * Get the driver's phba structure from the dev_id and 10697 * assume the HBA is not interrupting. 10698 */ 10699 phba = (struct lpfc_hba *) dev_id; 10700 10701 if (unlikely(!phba)) 10702 return IRQ_NONE; 10703 10704 /* Check device state for handling interrupt */ 10705 if (lpfc_intr_state_check(phba)) 10706 return IRQ_NONE; 10707 10708 spin_lock(&phba->hbalock); 10709 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 10710 spin_unlock(&phba->hbalock); 10711 return IRQ_HANDLED; 10712 } 10713 10714 if (unlikely(!phba->ha_copy)) { 10715 spin_unlock(&phba->hbalock); 10716 return IRQ_NONE; 10717 } else if (phba->ha_copy & HA_ERATT) { 10718 if (phba->hba_flag & HBA_ERATT_HANDLED) 10719 /* ERATT polling has handled ERATT */ 10720 phba->ha_copy &= ~HA_ERATT; 10721 else 10722 /* Indicate interrupt handler handles ERATT */ 10723 phba->hba_flag |= HBA_ERATT_HANDLED; 10724 } 10725 10726 /* 10727 * If there is deferred error attention, do not check for any interrupt. 10728 */ 10729 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10730 spin_unlock(&phba->hbalock); 10731 return IRQ_NONE; 10732 } 10733 10734 /* Clear attention sources except link and error attentions */ 10735 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 10736 spin_unlock(&phba->hbalock); 10737 return IRQ_HANDLED; 10738 } 10739 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 10740 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 10741 phba->HCregaddr); 10742 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 10743 writel(hc_copy, phba->HCregaddr); 10744 readl(phba->HAregaddr); /* flush */ 10745 spin_unlock(&phba->hbalock); 10746 10747 /* 10748 * Invokes slow-path host attention interrupt handling as appropriate. 10749 */ 10750 10751 /* status of events with mailbox and link attention */ 10752 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 10753 10754 /* status of events with ELS ring */ 10755 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 10756 status2 >>= (4*LPFC_ELS_RING); 10757 10758 if (status1 || (status2 & HA_RXMASK)) 10759 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 10760 else 10761 sp_irq_rc = IRQ_NONE; 10762 10763 /* 10764 * Invoke fast-path host attention interrupt handling as appropriate. 10765 */ 10766 10767 /* status of events with FCP ring */ 10768 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10769 status1 >>= (4*LPFC_FCP_RING); 10770 10771 /* status of events with extra ring */ 10772 if (phba->cfg_multi_ring_support == 2) { 10773 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10774 status2 >>= (4*LPFC_EXTRA_RING); 10775 } else 10776 status2 = 0; 10777 10778 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 10779 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 10780 else 10781 fp_irq_rc = IRQ_NONE; 10782 10783 /* Return device-level interrupt handling status */ 10784 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 10785 } /* lpfc_sli_intr_handler */ 10786 10787 /** 10788 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 10789 * @phba: pointer to lpfc hba data structure. 10790 * 10791 * This routine is invoked by the worker thread to process all the pending 10792 * SLI4 FCP abort XRI events. 10793 **/ 10794 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 10795 { 10796 struct lpfc_cq_event *cq_event; 10797 10798 /* First, declare the fcp xri abort event has been handled */ 10799 spin_lock_irq(&phba->hbalock); 10800 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 10801 spin_unlock_irq(&phba->hbalock); 10802 /* Now, handle all the fcp xri abort events */ 10803 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 10804 /* Get the first event from the head of the event queue */ 10805 spin_lock_irq(&phba->hbalock); 10806 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10807 cq_event, struct lpfc_cq_event, list); 10808 spin_unlock_irq(&phba->hbalock); 10809 /* Notify aborted XRI for FCP work queue */ 10810 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10811 /* Free the event processed back to the free pool */ 10812 lpfc_sli4_cq_event_release(phba, cq_event); 10813 } 10814 } 10815 10816 /** 10817 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 10818 * @phba: pointer to lpfc hba data structure. 10819 * 10820 * This routine is invoked by the worker thread to process all the pending 10821 * SLI4 els abort xri events. 10822 **/ 10823 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 10824 { 10825 struct lpfc_cq_event *cq_event; 10826 10827 /* First, declare the els xri abort event has been handled */ 10828 spin_lock_irq(&phba->hbalock); 10829 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 10830 spin_unlock_irq(&phba->hbalock); 10831 /* Now, handle all the els xri abort events */ 10832 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 10833 /* Get the first event from the head of the event queue */ 10834 spin_lock_irq(&phba->hbalock); 10835 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10836 cq_event, struct lpfc_cq_event, list); 10837 spin_unlock_irq(&phba->hbalock); 10838 /* Notify aborted XRI for ELS work queue */ 10839 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10840 /* Free the event processed back to the free pool */ 10841 lpfc_sli4_cq_event_release(phba, cq_event); 10842 } 10843 } 10844 10845 /** 10846 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 10847 * @phba: pointer to lpfc hba data structure 10848 * @pIocbIn: pointer to the rspiocbq 10849 * @pIocbOut: pointer to the cmdiocbq 10850 * @wcqe: pointer to the complete wcqe 10851 * 10852 * This routine transfers the fields of a command iocbq to a response iocbq 10853 * by copying all the IOCB fields from command iocbq and transferring the 10854 * completion status information from the complete wcqe. 10855 **/ 10856 static void 10857 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 10858 struct lpfc_iocbq *pIocbIn, 10859 struct lpfc_iocbq *pIocbOut, 10860 struct lpfc_wcqe_complete *wcqe) 10861 { 10862 unsigned long iflags; 10863 uint32_t status; 10864 size_t offset = offsetof(struct lpfc_iocbq, iocb); 10865 10866 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 10867 sizeof(struct lpfc_iocbq) - offset); 10868 /* Map WCQE parameters into irspiocb parameters */ 10869 status = bf_get(lpfc_wcqe_c_status, wcqe); 10870 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 10871 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 10872 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 10873 pIocbIn->iocb.un.fcpi.fcpi_parm = 10874 pIocbOut->iocb.un.fcpi.fcpi_parm - 10875 wcqe->total_data_placed; 10876 else 10877 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10878 else { 10879 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10880 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 10881 } 10882 10883 /* Convert BG errors for completion status */ 10884 if (status == CQE_STATUS_DI_ERROR) { 10885 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 10886 10887 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 10888 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 10889 else 10890 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 10891 10892 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 10893 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 10894 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10895 BGS_GUARD_ERR_MASK; 10896 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 10897 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10898 BGS_APPTAG_ERR_MASK; 10899 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 10900 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10901 BGS_REFTAG_ERR_MASK; 10902 10903 /* Check to see if there was any good data before the error */ 10904 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 10905 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10906 BGS_HI_WATER_MARK_PRESENT_MASK; 10907 pIocbIn->iocb.unsli3.sli3_bg.bghm = 10908 wcqe->total_data_placed; 10909 } 10910 10911 /* 10912 * Set ALL the error bits to indicate we don't know what 10913 * type of error it is. 10914 */ 10915 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 10916 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10917 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 10918 BGS_GUARD_ERR_MASK); 10919 } 10920 10921 /* Pick up HBA exchange busy condition */ 10922 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 10923 spin_lock_irqsave(&phba->hbalock, iflags); 10924 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 10925 spin_unlock_irqrestore(&phba->hbalock, iflags); 10926 } 10927 } 10928 10929 /** 10930 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 10931 * @phba: Pointer to HBA context object. 10932 * @wcqe: Pointer to work-queue completion queue entry. 10933 * 10934 * This routine handles an ELS work-queue completion event and construct 10935 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 10936 * discovery engine to handle. 10937 * 10938 * Return: Pointer to the receive IOCBQ, NULL otherwise. 10939 **/ 10940 static struct lpfc_iocbq * 10941 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 10942 struct lpfc_iocbq *irspiocbq) 10943 { 10944 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10945 struct lpfc_iocbq *cmdiocbq; 10946 struct lpfc_wcqe_complete *wcqe; 10947 unsigned long iflags; 10948 10949 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 10950 spin_lock_irqsave(&phba->hbalock, iflags); 10951 pring->stats.iocb_event++; 10952 /* Look up the ELS command IOCB and create pseudo response IOCB */ 10953 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 10954 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10955 spin_unlock_irqrestore(&phba->hbalock, iflags); 10956 10957 if (unlikely(!cmdiocbq)) { 10958 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10959 "0386 ELS complete with no corresponding " 10960 "cmdiocb: iotag (%d)\n", 10961 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10962 lpfc_sli_release_iocbq(phba, irspiocbq); 10963 return NULL; 10964 } 10965 10966 /* Fake the irspiocbq and copy necessary response information */ 10967 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 10968 10969 return irspiocbq; 10970 } 10971 10972 /** 10973 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 10974 * @phba: Pointer to HBA context object. 10975 * @cqe: Pointer to mailbox completion queue entry. 10976 * 10977 * This routine process a mailbox completion queue entry with asynchrous 10978 * event. 10979 * 10980 * Return: true if work posted to worker thread, otherwise false. 10981 **/ 10982 static bool 10983 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 10984 { 10985 struct lpfc_cq_event *cq_event; 10986 unsigned long iflags; 10987 10988 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10989 "0392 Async Event: word0:x%x, word1:x%x, " 10990 "word2:x%x, word3:x%x\n", mcqe->word0, 10991 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 10992 10993 /* Allocate a new internal CQ_EVENT entry */ 10994 cq_event = lpfc_sli4_cq_event_alloc(phba); 10995 if (!cq_event) { 10996 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10997 "0394 Failed to allocate CQ_EVENT entry\n"); 10998 return false; 10999 } 11000 11001 /* Move the CQE into an asynchronous event entry */ 11002 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 11003 spin_lock_irqsave(&phba->hbalock, iflags); 11004 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 11005 /* Set the async event flag */ 11006 phba->hba_flag |= ASYNC_EVENT; 11007 spin_unlock_irqrestore(&phba->hbalock, iflags); 11008 11009 return true; 11010 } 11011 11012 /** 11013 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 11014 * @phba: Pointer to HBA context object. 11015 * @cqe: Pointer to mailbox completion queue entry. 11016 * 11017 * This routine process a mailbox completion queue entry with mailbox 11018 * completion event. 11019 * 11020 * Return: true if work posted to worker thread, otherwise false. 11021 **/ 11022 static bool 11023 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 11024 { 11025 uint32_t mcqe_status; 11026 MAILBOX_t *mbox, *pmbox; 11027 struct lpfc_mqe *mqe; 11028 struct lpfc_vport *vport; 11029 struct lpfc_nodelist *ndlp; 11030 struct lpfc_dmabuf *mp; 11031 unsigned long iflags; 11032 LPFC_MBOXQ_t *pmb; 11033 bool workposted = false; 11034 int rc; 11035 11036 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 11037 if (!bf_get(lpfc_trailer_completed, mcqe)) 11038 goto out_no_mqe_complete; 11039 11040 /* Get the reference to the active mbox command */ 11041 spin_lock_irqsave(&phba->hbalock, iflags); 11042 pmb = phba->sli.mbox_active; 11043 if (unlikely(!pmb)) { 11044 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 11045 "1832 No pending MBOX command to handle\n"); 11046 spin_unlock_irqrestore(&phba->hbalock, iflags); 11047 goto out_no_mqe_complete; 11048 } 11049 spin_unlock_irqrestore(&phba->hbalock, iflags); 11050 mqe = &pmb->u.mqe; 11051 pmbox = (MAILBOX_t *)&pmb->u.mqe; 11052 mbox = phba->mbox; 11053 vport = pmb->vport; 11054 11055 /* Reset heartbeat timer */ 11056 phba->last_completion_time = jiffies; 11057 del_timer(&phba->sli.mbox_tmo); 11058 11059 /* Move mbox data to caller's mailbox region, do endian swapping */ 11060 if (pmb->mbox_cmpl && mbox) 11061 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 11062 11063 /* 11064 * For mcqe errors, conditionally move a modified error code to 11065 * the mbox so that the error will not be missed. 11066 */ 11067 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 11068 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 11069 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 11070 bf_set(lpfc_mqe_status, mqe, 11071 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 11072 } 11073 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 11074 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 11075 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 11076 "MBOX dflt rpi: status:x%x rpi:x%x", 11077 mcqe_status, 11078 pmbox->un.varWords[0], 0); 11079 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 11080 mp = (struct lpfc_dmabuf *)(pmb->context1); 11081 ndlp = (struct lpfc_nodelist *)pmb->context2; 11082 /* Reg_LOGIN of dflt RPI was successful. Now lets get 11083 * RID of the PPI using the same mbox buffer. 11084 */ 11085 lpfc_unreg_login(phba, vport->vpi, 11086 pmbox->un.varWords[0], pmb); 11087 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 11088 pmb->context1 = mp; 11089 pmb->context2 = ndlp; 11090 pmb->vport = vport; 11091 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 11092 if (rc != MBX_BUSY) 11093 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11094 LOG_SLI, "0385 rc should " 11095 "have been MBX_BUSY\n"); 11096 if (rc != MBX_NOT_FINISHED) 11097 goto send_current_mbox; 11098 } 11099 } 11100 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11101 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 11102 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11103 11104 /* There is mailbox completion work to do */ 11105 spin_lock_irqsave(&phba->hbalock, iflags); 11106 __lpfc_mbox_cmpl_put(phba, pmb); 11107 phba->work_ha |= HA_MBATT; 11108 spin_unlock_irqrestore(&phba->hbalock, iflags); 11109 workposted = true; 11110 11111 send_current_mbox: 11112 spin_lock_irqsave(&phba->hbalock, iflags); 11113 /* Release the mailbox command posting token */ 11114 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11115 /* Setting active mailbox pointer need to be in sync to flag clear */ 11116 phba->sli.mbox_active = NULL; 11117 spin_unlock_irqrestore(&phba->hbalock, iflags); 11118 /* Wake up worker thread to post the next pending mailbox command */ 11119 lpfc_worker_wake_up(phba); 11120 out_no_mqe_complete: 11121 if (bf_get(lpfc_trailer_consumed, mcqe)) 11122 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 11123 return workposted; 11124 } 11125 11126 /** 11127 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 11128 * @phba: Pointer to HBA context object. 11129 * @cqe: Pointer to mailbox completion queue entry. 11130 * 11131 * This routine process a mailbox completion queue entry, it invokes the 11132 * proper mailbox complete handling or asynchrous event handling routine 11133 * according to the MCQE's async bit. 11134 * 11135 * Return: true if work posted to worker thread, otherwise false. 11136 **/ 11137 static bool 11138 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 11139 { 11140 struct lpfc_mcqe mcqe; 11141 bool workposted; 11142 11143 /* Copy the mailbox MCQE and convert endian order as needed */ 11144 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 11145 11146 /* Invoke the proper event handling routine */ 11147 if (!bf_get(lpfc_trailer_async, &mcqe)) 11148 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 11149 else 11150 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 11151 return workposted; 11152 } 11153 11154 /** 11155 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 11156 * @phba: Pointer to HBA context object. 11157 * @wcqe: Pointer to work-queue completion queue entry. 11158 * 11159 * This routine handles an ELS work-queue completion event. 11160 * 11161 * Return: true if work posted to worker thread, otherwise false. 11162 **/ 11163 static bool 11164 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 11165 struct lpfc_wcqe_complete *wcqe) 11166 { 11167 struct lpfc_iocbq *irspiocbq; 11168 unsigned long iflags; 11169 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11170 11171 /* Get an irspiocbq for later ELS response processing use */ 11172 irspiocbq = lpfc_sli_get_iocbq(phba); 11173 if (!irspiocbq) { 11174 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11175 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 11176 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 11177 pring->txq_cnt, phba->iocb_cnt, 11178 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 11179 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 11180 return false; 11181 } 11182 11183 /* Save off the slow-path queue event for work thread to process */ 11184 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 11185 spin_lock_irqsave(&phba->hbalock, iflags); 11186 list_add_tail(&irspiocbq->cq_event.list, 11187 &phba->sli4_hba.sp_queue_event); 11188 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11189 spin_unlock_irqrestore(&phba->hbalock, iflags); 11190 11191 return true; 11192 } 11193 11194 /** 11195 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 11196 * @phba: Pointer to HBA context object. 11197 * @wcqe: Pointer to work-queue completion queue entry. 11198 * 11199 * This routine handles slow-path WQ entry comsumed event by invoking the 11200 * proper WQ release routine to the slow-path WQ. 11201 **/ 11202 static void 11203 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 11204 struct lpfc_wcqe_release *wcqe) 11205 { 11206 /* sanity check on queue memory */ 11207 if (unlikely(!phba->sli4_hba.els_wq)) 11208 return; 11209 /* Check for the slow-path ELS work queue */ 11210 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 11211 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 11212 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11213 else 11214 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11215 "2579 Slow-path wqe consume event carries " 11216 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 11217 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 11218 phba->sli4_hba.els_wq->queue_id); 11219 } 11220 11221 /** 11222 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 11223 * @phba: Pointer to HBA context object. 11224 * @cq: Pointer to a WQ completion queue. 11225 * @wcqe: Pointer to work-queue completion queue entry. 11226 * 11227 * This routine handles an XRI abort event. 11228 * 11229 * Return: true if work posted to worker thread, otherwise false. 11230 **/ 11231 static bool 11232 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 11233 struct lpfc_queue *cq, 11234 struct sli4_wcqe_xri_aborted *wcqe) 11235 { 11236 bool workposted = false; 11237 struct lpfc_cq_event *cq_event; 11238 unsigned long iflags; 11239 11240 /* Allocate a new internal CQ_EVENT entry */ 11241 cq_event = lpfc_sli4_cq_event_alloc(phba); 11242 if (!cq_event) { 11243 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11244 "0602 Failed to allocate CQ_EVENT entry\n"); 11245 return false; 11246 } 11247 11248 /* Move the CQE into the proper xri abort event list */ 11249 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 11250 switch (cq->subtype) { 11251 case LPFC_FCP: 11252 spin_lock_irqsave(&phba->hbalock, iflags); 11253 list_add_tail(&cq_event->list, 11254 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 11255 /* Set the fcp xri abort event flag */ 11256 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 11257 spin_unlock_irqrestore(&phba->hbalock, iflags); 11258 workposted = true; 11259 break; 11260 case LPFC_ELS: 11261 spin_lock_irqsave(&phba->hbalock, iflags); 11262 list_add_tail(&cq_event->list, 11263 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 11264 /* Set the els xri abort event flag */ 11265 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 11266 spin_unlock_irqrestore(&phba->hbalock, iflags); 11267 workposted = true; 11268 break; 11269 default: 11270 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11271 "0603 Invalid work queue CQE subtype (x%x)\n", 11272 cq->subtype); 11273 workposted = false; 11274 break; 11275 } 11276 return workposted; 11277 } 11278 11279 /** 11280 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 11281 * @phba: Pointer to HBA context object. 11282 * @rcqe: Pointer to receive-queue completion queue entry. 11283 * 11284 * This routine process a receive-queue completion queue entry. 11285 * 11286 * Return: true if work posted to worker thread, otherwise false. 11287 **/ 11288 static bool 11289 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 11290 { 11291 bool workposted = false; 11292 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 11293 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 11294 struct hbq_dmabuf *dma_buf; 11295 uint32_t status, rq_id; 11296 unsigned long iflags; 11297 11298 /* sanity check on queue memory */ 11299 if (unlikely(!hrq) || unlikely(!drq)) 11300 return workposted; 11301 11302 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11303 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11304 else 11305 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 11306 if (rq_id != hrq->queue_id) 11307 goto out; 11308 11309 status = bf_get(lpfc_rcqe_status, rcqe); 11310 switch (status) { 11311 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11313 "2537 Receive Frame Truncated!!\n"); 11314 case FC_STATUS_RQ_SUCCESS: 11315 lpfc_sli4_rq_release(hrq, drq); 11316 spin_lock_irqsave(&phba->hbalock, iflags); 11317 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11318 if (!dma_buf) { 11319 spin_unlock_irqrestore(&phba->hbalock, iflags); 11320 goto out; 11321 } 11322 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11323 /* save off the frame for the word thread to process */ 11324 list_add_tail(&dma_buf->cq_event.list, 11325 &phba->sli4_hba.sp_queue_event); 11326 /* Frame received */ 11327 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11328 spin_unlock_irqrestore(&phba->hbalock, iflags); 11329 workposted = true; 11330 break; 11331 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11332 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11333 /* Post more buffers if possible */ 11334 spin_lock_irqsave(&phba->hbalock, iflags); 11335 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11336 spin_unlock_irqrestore(&phba->hbalock, iflags); 11337 workposted = true; 11338 break; 11339 } 11340 out: 11341 return workposted; 11342 } 11343 11344 /** 11345 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 11346 * @phba: Pointer to HBA context object. 11347 * @cq: Pointer to the completion queue. 11348 * @wcqe: Pointer to a completion queue entry. 11349 * 11350 * This routine process a slow-path work-queue or receive queue completion queue 11351 * entry. 11352 * 11353 * Return: true if work posted to worker thread, otherwise false. 11354 **/ 11355 static bool 11356 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11357 struct lpfc_cqe *cqe) 11358 { 11359 struct lpfc_cqe cqevt; 11360 bool workposted = false; 11361 11362 /* Copy the work queue CQE and convert endian order if needed */ 11363 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 11364 11365 /* Check and process for different type of WCQE and dispatch */ 11366 switch (bf_get(lpfc_cqe_code, &cqevt)) { 11367 case CQE_CODE_COMPL_WQE: 11368 /* Process the WQ/RQ complete event */ 11369 phba->last_completion_time = jiffies; 11370 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 11371 (struct lpfc_wcqe_complete *)&cqevt); 11372 break; 11373 case CQE_CODE_RELEASE_WQE: 11374 /* Process the WQ release event */ 11375 lpfc_sli4_sp_handle_rel_wcqe(phba, 11376 (struct lpfc_wcqe_release *)&cqevt); 11377 break; 11378 case CQE_CODE_XRI_ABORTED: 11379 /* Process the WQ XRI abort event */ 11380 phba->last_completion_time = jiffies; 11381 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11382 (struct sli4_wcqe_xri_aborted *)&cqevt); 11383 break; 11384 case CQE_CODE_RECEIVE: 11385 case CQE_CODE_RECEIVE_V1: 11386 /* Process the RQ event */ 11387 phba->last_completion_time = jiffies; 11388 workposted = lpfc_sli4_sp_handle_rcqe(phba, 11389 (struct lpfc_rcqe *)&cqevt); 11390 break; 11391 default: 11392 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11393 "0388 Not a valid WCQE code: x%x\n", 11394 bf_get(lpfc_cqe_code, &cqevt)); 11395 break; 11396 } 11397 return workposted; 11398 } 11399 11400 /** 11401 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 11402 * @phba: Pointer to HBA context object. 11403 * @eqe: Pointer to fast-path event queue entry. 11404 * 11405 * This routine process a event queue entry from the slow-path event queue. 11406 * It will check the MajorCode and MinorCode to determine this is for a 11407 * completion event on a completion queue, if not, an error shall be logged 11408 * and just return. Otherwise, it will get to the corresponding completion 11409 * queue and process all the entries on that completion queue, rearm the 11410 * completion queue, and then return. 11411 * 11412 **/ 11413 static void 11414 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11415 { 11416 struct lpfc_queue *cq = NULL, *childq, *speq; 11417 struct lpfc_cqe *cqe; 11418 bool workposted = false; 11419 int ecount = 0; 11420 uint16_t cqid; 11421 11422 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 11423 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11424 "0359 Not a valid slow-path completion " 11425 "event: majorcode=x%x, minorcode=x%x\n", 11426 bf_get_le32(lpfc_eqe_major_code, eqe), 11427 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11428 return; 11429 } 11430 11431 /* Get the reference to the corresponding CQ */ 11432 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11433 11434 /* Search for completion queue pointer matching this cqid */ 11435 speq = phba->sli4_hba.sp_eq; 11436 /* sanity check on queue memory */ 11437 if (unlikely(!speq)) 11438 return; 11439 list_for_each_entry(childq, &speq->child_list, list) { 11440 if (childq->queue_id == cqid) { 11441 cq = childq; 11442 break; 11443 } 11444 } 11445 if (unlikely(!cq)) { 11446 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11447 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11448 "0365 Slow-path CQ identifier " 11449 "(%d) does not exist\n", cqid); 11450 return; 11451 } 11452 11453 /* Process all the entries to the CQ */ 11454 switch (cq->type) { 11455 case LPFC_MCQ: 11456 while ((cqe = lpfc_sli4_cq_get(cq))) { 11457 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11458 if (!(++ecount % cq->entry_repost)) 11459 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11460 } 11461 break; 11462 case LPFC_WCQ: 11463 while ((cqe = lpfc_sli4_cq_get(cq))) { 11464 if (cq->subtype == LPFC_FCP) 11465 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, 11466 cqe); 11467 else 11468 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 11469 cqe); 11470 if (!(++ecount % cq->entry_repost)) 11471 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11472 } 11473 break; 11474 default: 11475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11476 "0370 Invalid completion queue type (%d)\n", 11477 cq->type); 11478 return; 11479 } 11480 11481 /* Catch the no cq entry condition, log an error */ 11482 if (unlikely(ecount == 0)) 11483 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11484 "0371 No entry from the CQ: identifier " 11485 "(x%x), type (%d)\n", cq->queue_id, cq->type); 11486 11487 /* In any case, flash and re-arm the RCQ */ 11488 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11489 11490 /* wake up worker thread if there are works to be done */ 11491 if (workposted) 11492 lpfc_worker_wake_up(phba); 11493 } 11494 11495 /** 11496 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11497 * @eqe: Pointer to fast-path completion queue entry. 11498 * 11499 * This routine process a fast-path work queue completion entry from fast-path 11500 * event queue for FCP command response completion. 11501 **/ 11502 static void 11503 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 11504 struct lpfc_wcqe_complete *wcqe) 11505 { 11506 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11507 struct lpfc_iocbq *cmdiocbq; 11508 struct lpfc_iocbq irspiocbq; 11509 unsigned long iflags; 11510 11511 spin_lock_irqsave(&phba->hbalock, iflags); 11512 pring->stats.iocb_event++; 11513 spin_unlock_irqrestore(&phba->hbalock, iflags); 11514 11515 /* Check for response status */ 11516 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11517 /* If resource errors reported from HBA, reduce queue 11518 * depth of the SCSI device. 11519 */ 11520 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 11521 IOSTAT_LOCAL_REJECT) && 11522 (wcqe->parameter == IOERR_NO_RESOURCES)) { 11523 phba->lpfc_rampdown_queue_depth(phba); 11524 } 11525 /* Log the error status */ 11526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11527 "0373 FCP complete error: status=x%x, " 11528 "hw_status=x%x, total_data_specified=%d, " 11529 "parameter=x%x, word3=x%x\n", 11530 bf_get(lpfc_wcqe_c_status, wcqe), 11531 bf_get(lpfc_wcqe_c_hw_status, wcqe), 11532 wcqe->total_data_placed, wcqe->parameter, 11533 wcqe->word3); 11534 } 11535 11536 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11537 spin_lock_irqsave(&phba->hbalock, iflags); 11538 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11539 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11540 spin_unlock_irqrestore(&phba->hbalock, iflags); 11541 if (unlikely(!cmdiocbq)) { 11542 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11543 "0374 FCP complete with no corresponding " 11544 "cmdiocb: iotag (%d)\n", 11545 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11546 return; 11547 } 11548 if (unlikely(!cmdiocbq->iocb_cmpl)) { 11549 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11550 "0375 FCP cmdiocb not callback function " 11551 "iotag: (%d)\n", 11552 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11553 return; 11554 } 11555 11556 /* Fake the irspiocb and copy necessary response information */ 11557 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 11558 11559 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 11560 spin_lock_irqsave(&phba->hbalock, iflags); 11561 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 11562 spin_unlock_irqrestore(&phba->hbalock, iflags); 11563 } 11564 11565 /* Pass the cmd_iocb and the rsp state to the upper layer */ 11566 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 11567 } 11568 11569 /** 11570 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 11571 * @phba: Pointer to HBA context object. 11572 * @cq: Pointer to completion queue. 11573 * @wcqe: Pointer to work-queue completion queue entry. 11574 * 11575 * This routine handles an fast-path WQ entry comsumed event by invoking the 11576 * proper WQ release routine to the slow-path WQ. 11577 **/ 11578 static void 11579 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11580 struct lpfc_wcqe_release *wcqe) 11581 { 11582 struct lpfc_queue *childwq; 11583 bool wqid_matched = false; 11584 uint16_t fcp_wqid; 11585 11586 /* Check for fast-path FCP work queue release */ 11587 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 11588 list_for_each_entry(childwq, &cq->child_list, list) { 11589 if (childwq->queue_id == fcp_wqid) { 11590 lpfc_sli4_wq_release(childwq, 11591 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11592 wqid_matched = true; 11593 break; 11594 } 11595 } 11596 /* Report warning log message if no match found */ 11597 if (wqid_matched != true) 11598 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11599 "2580 Fast-path wqe consume event carries " 11600 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 11601 } 11602 11603 /** 11604 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 11605 * @cq: Pointer to the completion queue. 11606 * @eqe: Pointer to fast-path completion queue entry. 11607 * 11608 * This routine process a fast-path work queue completion entry from fast-path 11609 * event queue for FCP command response completion. 11610 **/ 11611 static int 11612 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11613 struct lpfc_cqe *cqe) 11614 { 11615 struct lpfc_wcqe_release wcqe; 11616 bool workposted = false; 11617 11618 /* Copy the work queue CQE and convert endian order if needed */ 11619 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 11620 11621 /* Check and process for different type of WCQE and dispatch */ 11622 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 11623 case CQE_CODE_COMPL_WQE: 11624 /* Process the WQ complete event */ 11625 phba->last_completion_time = jiffies; 11626 lpfc_sli4_fp_handle_fcp_wcqe(phba, 11627 (struct lpfc_wcqe_complete *)&wcqe); 11628 break; 11629 case CQE_CODE_RELEASE_WQE: 11630 /* Process the WQ release event */ 11631 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 11632 (struct lpfc_wcqe_release *)&wcqe); 11633 break; 11634 case CQE_CODE_XRI_ABORTED: 11635 /* Process the WQ XRI abort event */ 11636 phba->last_completion_time = jiffies; 11637 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11638 (struct sli4_wcqe_xri_aborted *)&wcqe); 11639 break; 11640 default: 11641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11642 "0144 Not a valid WCQE code: x%x\n", 11643 bf_get(lpfc_wcqe_c_code, &wcqe)); 11644 break; 11645 } 11646 return workposted; 11647 } 11648 11649 /** 11650 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11651 * @phba: Pointer to HBA context object. 11652 * @eqe: Pointer to fast-path event queue entry. 11653 * 11654 * This routine process a event queue entry from the fast-path event queue. 11655 * It will check the MajorCode and MinorCode to determine this is for a 11656 * completion event on a completion queue, if not, an error shall be logged 11657 * and just return. Otherwise, it will get to the corresponding completion 11658 * queue and process all the entries on the completion queue, rearm the 11659 * completion queue, and then return. 11660 **/ 11661 static void 11662 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11663 uint32_t fcp_cqidx) 11664 { 11665 struct lpfc_queue *cq; 11666 struct lpfc_cqe *cqe; 11667 bool workposted = false; 11668 uint16_t cqid; 11669 int ecount = 0; 11670 11671 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11673 "0366 Not a valid fast-path completion " 11674 "event: majorcode=x%x, minorcode=x%x\n", 11675 bf_get_le32(lpfc_eqe_major_code, eqe), 11676 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11677 return; 11678 } 11679 11680 if (unlikely(!phba->sli4_hba.fcp_cq)) { 11681 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11682 "3146 Fast-path completion queues " 11683 "does not exist\n"); 11684 return; 11685 } 11686 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11687 if (unlikely(!cq)) { 11688 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11690 "0367 Fast-path completion queue " 11691 "(%d) does not exist\n", fcp_cqidx); 11692 return; 11693 } 11694 11695 /* Get the reference to the corresponding CQ */ 11696 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11697 if (unlikely(cqid != cq->queue_id)) { 11698 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11699 "0368 Miss-matched fast-path completion " 11700 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 11701 cqid, cq->queue_id); 11702 return; 11703 } 11704 11705 /* Process all the entries to the CQ */ 11706 while ((cqe = lpfc_sli4_cq_get(cq))) { 11707 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 11708 if (!(++ecount % cq->entry_repost)) 11709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11710 } 11711 11712 /* Catch the no cq entry condition */ 11713 if (unlikely(ecount == 0)) 11714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11715 "0369 No entry from fast-path completion " 11716 "queue fcpcqid=%d\n", cq->queue_id); 11717 11718 /* In any case, flash and re-arm the CQ */ 11719 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11720 11721 /* wake up worker thread if there are works to be done */ 11722 if (workposted) 11723 lpfc_worker_wake_up(phba); 11724 } 11725 11726 static void 11727 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 11728 { 11729 struct lpfc_eqe *eqe; 11730 11731 /* walk all the EQ entries and drop on the floor */ 11732 while ((eqe = lpfc_sli4_eq_get(eq))) 11733 ; 11734 11735 /* Clear and re-arm the EQ */ 11736 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 11737 } 11738 11739 /** 11740 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11741 * @irq: Interrupt number. 11742 * @dev_id: The device context pointer. 11743 * 11744 * This function is directly called from the PCI layer as an interrupt 11745 * service routine when device with SLI-4 interface spec is enabled with 11746 * MSI-X multi-message interrupt mode and there are slow-path events in 11747 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 11748 * interrupt mode, this function is called as part of the device-level 11749 * interrupt handler. When the PCI slot is in error recovery or the HBA is 11750 * undergoing initialization, the interrupt handler will not process the 11751 * interrupt. The link attention and ELS ring attention events are handled 11752 * by the worker thread. The interrupt handler signals the worker thread 11753 * and returns for these events. This function is called without any lock 11754 * held. It gets the hbalock to access and update SLI data structures. 11755 * 11756 * This function returns IRQ_HANDLED when interrupt is handled else it 11757 * returns IRQ_NONE. 11758 **/ 11759 irqreturn_t 11760 lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 11761 { 11762 struct lpfc_hba *phba; 11763 struct lpfc_queue *speq; 11764 struct lpfc_eqe *eqe; 11765 unsigned long iflag; 11766 int ecount = 0; 11767 11768 /* 11769 * Get the driver's phba structure from the dev_id 11770 */ 11771 phba = (struct lpfc_hba *)dev_id; 11772 11773 if (unlikely(!phba)) 11774 return IRQ_NONE; 11775 11776 /* Get to the EQ struct associated with this vector */ 11777 speq = phba->sli4_hba.sp_eq; 11778 if (unlikely(!speq)) 11779 return IRQ_NONE; 11780 11781 /* Check device state for handling interrupt */ 11782 if (unlikely(lpfc_intr_state_check(phba))) { 11783 /* Check again for link_state with lock held */ 11784 spin_lock_irqsave(&phba->hbalock, iflag); 11785 if (phba->link_state < LPFC_LINK_DOWN) 11786 /* Flush, clear interrupt, and rearm the EQ */ 11787 lpfc_sli4_eq_flush(phba, speq); 11788 spin_unlock_irqrestore(&phba->hbalock, iflag); 11789 return IRQ_NONE; 11790 } 11791 11792 /* 11793 * Process all the event on FCP slow-path EQ 11794 */ 11795 while ((eqe = lpfc_sli4_eq_get(speq))) { 11796 lpfc_sli4_sp_handle_eqe(phba, eqe); 11797 if (!(++ecount % speq->entry_repost)) 11798 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 11799 } 11800 11801 /* Always clear and re-arm the slow-path EQ */ 11802 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 11803 11804 /* Catch the no cq entry condition */ 11805 if (unlikely(ecount == 0)) { 11806 if (phba->intr_type == MSIX) 11807 /* MSI-X treated interrupt served as no EQ share INT */ 11808 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11809 "0357 MSI-X interrupt with no EQE\n"); 11810 else 11811 /* Non MSI-X treated on interrupt as EQ share INT */ 11812 return IRQ_NONE; 11813 } 11814 11815 return IRQ_HANDLED; 11816 } /* lpfc_sli4_sp_intr_handler */ 11817 11818 /** 11819 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 11820 * @irq: Interrupt number. 11821 * @dev_id: The device context pointer. 11822 * 11823 * This function is directly called from the PCI layer as an interrupt 11824 * service routine when device with SLI-4 interface spec is enabled with 11825 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 11826 * ring event in the HBA. However, when the device is enabled with either 11827 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 11828 * device-level interrupt handler. When the PCI slot is in error recovery 11829 * or the HBA is undergoing initialization, the interrupt handler will not 11830 * process the interrupt. The SCSI FCP fast-path ring event are handled in 11831 * the intrrupt context. This function is called without any lock held. 11832 * It gets the hbalock to access and update SLI data structures. Note that, 11833 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11834 * equal to that of FCP CQ index. 11835 * 11836 * This function returns IRQ_HANDLED when interrupt is handled else it 11837 * returns IRQ_NONE. 11838 **/ 11839 irqreturn_t 11840 lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11841 { 11842 struct lpfc_hba *phba; 11843 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11844 struct lpfc_queue *fpeq; 11845 struct lpfc_eqe *eqe; 11846 unsigned long iflag; 11847 int ecount = 0; 11848 uint32_t fcp_eqidx; 11849 11850 /* Get the driver's phba structure from the dev_id */ 11851 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 11852 phba = fcp_eq_hdl->phba; 11853 fcp_eqidx = fcp_eq_hdl->idx; 11854 11855 if (unlikely(!phba)) 11856 return IRQ_NONE; 11857 if (unlikely(!phba->sli4_hba.fp_eq)) 11858 return IRQ_NONE; 11859 11860 /* Get to the EQ struct associated with this vector */ 11861 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11862 if (unlikely(!fpeq)) 11863 return IRQ_NONE; 11864 11865 /* Check device state for handling interrupt */ 11866 if (unlikely(lpfc_intr_state_check(phba))) { 11867 /* Check again for link_state with lock held */ 11868 spin_lock_irqsave(&phba->hbalock, iflag); 11869 if (phba->link_state < LPFC_LINK_DOWN) 11870 /* Flush, clear interrupt, and rearm the EQ */ 11871 lpfc_sli4_eq_flush(phba, fpeq); 11872 spin_unlock_irqrestore(&phba->hbalock, iflag); 11873 return IRQ_NONE; 11874 } 11875 11876 /* 11877 * Process all the event on FCP fast-path EQ 11878 */ 11879 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11880 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11881 if (!(++ecount % fpeq->entry_repost)) 11882 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11883 } 11884 11885 /* Always clear and re-arm the fast-path EQ */ 11886 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11887 11888 if (unlikely(ecount == 0)) { 11889 if (phba->intr_type == MSIX) 11890 /* MSI-X treated interrupt served as no EQ share INT */ 11891 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11892 "0358 MSI-X interrupt with no EQE\n"); 11893 else 11894 /* Non MSI-X treated on interrupt as EQ share INT */ 11895 return IRQ_NONE; 11896 } 11897 11898 return IRQ_HANDLED; 11899 } /* lpfc_sli4_fp_intr_handler */ 11900 11901 /** 11902 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 11903 * @irq: Interrupt number. 11904 * @dev_id: The device context pointer. 11905 * 11906 * This function is the device-level interrupt handler to device with SLI-4 11907 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 11908 * interrupt mode is enabled and there is an event in the HBA which requires 11909 * driver attention. This function invokes the slow-path interrupt attention 11910 * handling function and fast-path interrupt attention handling function in 11911 * turn to process the relevant HBA attention events. This function is called 11912 * without any lock held. It gets the hbalock to access and update SLI data 11913 * structures. 11914 * 11915 * This function returns IRQ_HANDLED when interrupt is handled, else it 11916 * returns IRQ_NONE. 11917 **/ 11918 irqreturn_t 11919 lpfc_sli4_intr_handler(int irq, void *dev_id) 11920 { 11921 struct lpfc_hba *phba; 11922 irqreturn_t sp_irq_rc, fp_irq_rc; 11923 bool fp_handled = false; 11924 uint32_t fcp_eqidx; 11925 11926 /* Get the driver's phba structure from the dev_id */ 11927 phba = (struct lpfc_hba *)dev_id; 11928 11929 if (unlikely(!phba)) 11930 return IRQ_NONE; 11931 11932 /* 11933 * Invokes slow-path host attention interrupt handling as appropriate. 11934 */ 11935 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 11936 11937 /* 11938 * Invoke fast-path host attention interrupt handling as appropriate. 11939 */ 11940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11941 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11942 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11943 if (fp_irq_rc == IRQ_HANDLED) 11944 fp_handled |= true; 11945 } 11946 11947 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11948 } /* lpfc_sli4_intr_handler */ 11949 11950 /** 11951 * lpfc_sli4_queue_free - free a queue structure and associated memory 11952 * @queue: The queue structure to free. 11953 * 11954 * This function frees a queue structure and the DMAable memory used for 11955 * the host resident queue. This function must be called after destroying the 11956 * queue on the HBA. 11957 **/ 11958 void 11959 lpfc_sli4_queue_free(struct lpfc_queue *queue) 11960 { 11961 struct lpfc_dmabuf *dmabuf; 11962 11963 if (!queue) 11964 return; 11965 11966 while (!list_empty(&queue->page_list)) { 11967 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 11968 list); 11969 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 11970 dmabuf->virt, dmabuf->phys); 11971 kfree(dmabuf); 11972 } 11973 kfree(queue); 11974 return; 11975 } 11976 11977 /** 11978 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 11979 * @phba: The HBA that this queue is being created on. 11980 * @entry_size: The size of each queue entry for this queue. 11981 * @entry count: The number of entries that this queue will handle. 11982 * 11983 * This function allocates a queue structure and the DMAable memory used for 11984 * the host resident queue. This function must be called before creating the 11985 * queue on the HBA. 11986 **/ 11987 struct lpfc_queue * 11988 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 11989 uint32_t entry_count) 11990 { 11991 struct lpfc_queue *queue; 11992 struct lpfc_dmabuf *dmabuf; 11993 int x, total_qe_count; 11994 void *dma_pointer; 11995 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11996 11997 if (!phba->sli4_hba.pc_sli4_params.supported) 11998 hw_page_size = SLI4_PAGE_SIZE; 11999 12000 queue = kzalloc(sizeof(struct lpfc_queue) + 12001 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 12002 if (!queue) 12003 return NULL; 12004 queue->page_count = (ALIGN(entry_size * entry_count, 12005 hw_page_size))/hw_page_size; 12006 INIT_LIST_HEAD(&queue->list); 12007 INIT_LIST_HEAD(&queue->page_list); 12008 INIT_LIST_HEAD(&queue->child_list); 12009 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 12010 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 12011 if (!dmabuf) 12012 goto out_fail; 12013 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12014 hw_page_size, &dmabuf->phys, 12015 GFP_KERNEL); 12016 if (!dmabuf->virt) { 12017 kfree(dmabuf); 12018 goto out_fail; 12019 } 12020 memset(dmabuf->virt, 0, hw_page_size); 12021 dmabuf->buffer_tag = x; 12022 list_add_tail(&dmabuf->list, &queue->page_list); 12023 /* initialize queue's entry array */ 12024 dma_pointer = dmabuf->virt; 12025 for (; total_qe_count < entry_count && 12026 dma_pointer < (hw_page_size + dmabuf->virt); 12027 total_qe_count++, dma_pointer += entry_size) { 12028 queue->qe[total_qe_count].address = dma_pointer; 12029 } 12030 } 12031 queue->entry_size = entry_size; 12032 queue->entry_count = entry_count; 12033 12034 /* 12035 * entry_repost is calculated based on the number of entries in the 12036 * queue. This works out except for RQs. If buffers are NOT initially 12037 * posted for every RQE, entry_repost should be adjusted accordingly. 12038 */ 12039 queue->entry_repost = (entry_count >> 3); 12040 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) 12041 queue->entry_repost = LPFC_QUEUE_MIN_REPOST; 12042 queue->phba = phba; 12043 12044 return queue; 12045 out_fail: 12046 lpfc_sli4_queue_free(queue); 12047 return NULL; 12048 } 12049 12050 /** 12051 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs 12052 * @phba: HBA structure that indicates port to create a queue on. 12053 * @startq: The starting FCP EQ to modify 12054 * 12055 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 12056 * 12057 * The @phba struct is used to send mailbox command to HBA. The @startq 12058 * is used to get the starting FCP EQ to change. 12059 * This function is asynchronous and will wait for the mailbox 12060 * command to finish before continuing. 12061 * 12062 * On success this function will return a zero. If unable to allocate enough 12063 * memory this function will return -ENOMEM. If the queue create mailbox command 12064 * fails this function will return -ENXIO. 12065 **/ 12066 uint32_t 12067 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) 12068 { 12069 struct lpfc_mbx_modify_eq_delay *eq_delay; 12070 LPFC_MBOXQ_t *mbox; 12071 struct lpfc_queue *eq; 12072 int cnt, rc, length, status = 0; 12073 uint32_t shdr_status, shdr_add_status; 12074 int fcp_eqidx; 12075 union lpfc_sli4_cfg_shdr *shdr; 12076 uint16_t dmult; 12077 12078 if (startq >= phba->cfg_fcp_eq_count) 12079 return 0; 12080 12081 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12082 if (!mbox) 12083 return -ENOMEM; 12084 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 12085 sizeof(struct lpfc_sli4_cfg_mhdr)); 12086 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12087 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 12088 length, LPFC_SLI4_MBX_EMBED); 12089 eq_delay = &mbox->u.mqe.un.eq_delay; 12090 12091 /* Calculate delay multiper from maximum interrupt per second */ 12092 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1; 12093 12094 cnt = 0; 12095 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count; 12096 fcp_eqidx++) { 12097 eq = phba->sli4_hba.fp_eq[fcp_eqidx]; 12098 if (!eq) 12099 continue; 12100 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12101 eq_delay->u.request.eq[cnt].phase = 0; 12102 eq_delay->u.request.eq[cnt].delay_multi = dmult; 12103 cnt++; 12104 if (cnt >= LPFC_MAX_EQ_DELAY) 12105 break; 12106 } 12107 eq_delay->u.request.num_eq = cnt; 12108 12109 mbox->vport = phba->pport; 12110 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12111 mbox->context1 = NULL; 12112 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12113 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 12114 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12115 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12116 if (shdr_status || shdr_add_status || rc) { 12117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12118 "2512 MODIFY_EQ_DELAY mailbox failed with " 12119 "status x%x add_status x%x, mbx status x%x\n", 12120 shdr_status, shdr_add_status, rc); 12121 status = -ENXIO; 12122 } 12123 mempool_free(mbox, phba->mbox_mem_pool); 12124 return status; 12125 } 12126 12127 /** 12128 * lpfc_eq_create - Create an Event Queue on the HBA 12129 * @phba: HBA structure that indicates port to create a queue on. 12130 * @eq: The queue structure to use to create the event queue. 12131 * @imax: The maximum interrupt per second limit. 12132 * 12133 * This function creates an event queue, as detailed in @eq, on a port, 12134 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 12135 * 12136 * The @phba struct is used to send mailbox command to HBA. The @eq struct 12137 * is used to get the entry count and entry size that are necessary to 12138 * determine the number of pages to allocate and use for this queue. This 12139 * function will send the EQ_CREATE mailbox command to the HBA to setup the 12140 * event queue. This function is asynchronous and will wait for the mailbox 12141 * command to finish before continuing. 12142 * 12143 * On success this function will return a zero. If unable to allocate enough 12144 * memory this function will return -ENOMEM. If the queue create mailbox command 12145 * fails this function will return -ENXIO. 12146 **/ 12147 uint32_t 12148 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 12149 { 12150 struct lpfc_mbx_eq_create *eq_create; 12151 LPFC_MBOXQ_t *mbox; 12152 int rc, length, status = 0; 12153 struct lpfc_dmabuf *dmabuf; 12154 uint32_t shdr_status, shdr_add_status; 12155 union lpfc_sli4_cfg_shdr *shdr; 12156 uint16_t dmult; 12157 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12158 12159 /* sanity check on queue memory */ 12160 if (!eq) 12161 return -ENODEV; 12162 if (!phba->sli4_hba.pc_sli4_params.supported) 12163 hw_page_size = SLI4_PAGE_SIZE; 12164 12165 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12166 if (!mbox) 12167 return -ENOMEM; 12168 length = (sizeof(struct lpfc_mbx_eq_create) - 12169 sizeof(struct lpfc_sli4_cfg_mhdr)); 12170 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12171 LPFC_MBOX_OPCODE_EQ_CREATE, 12172 length, LPFC_SLI4_MBX_EMBED); 12173 eq_create = &mbox->u.mqe.un.eq_create; 12174 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 12175 eq->page_count); 12176 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 12177 LPFC_EQE_SIZE); 12178 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 12179 /* Calculate delay multiper from maximum interrupt per second */ 12180 dmult = LPFC_DMULT_CONST/imax - 1; 12181 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 12182 dmult); 12183 switch (eq->entry_count) { 12184 default: 12185 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12186 "0360 Unsupported EQ count. (%d)\n", 12187 eq->entry_count); 12188 if (eq->entry_count < 256) 12189 return -EINVAL; 12190 /* otherwise default to smallest count (drop through) */ 12191 case 256: 12192 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12193 LPFC_EQ_CNT_256); 12194 break; 12195 case 512: 12196 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12197 LPFC_EQ_CNT_512); 12198 break; 12199 case 1024: 12200 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12201 LPFC_EQ_CNT_1024); 12202 break; 12203 case 2048: 12204 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12205 LPFC_EQ_CNT_2048); 12206 break; 12207 case 4096: 12208 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12209 LPFC_EQ_CNT_4096); 12210 break; 12211 } 12212 list_for_each_entry(dmabuf, &eq->page_list, list) { 12213 memset(dmabuf->virt, 0, hw_page_size); 12214 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12215 putPaddrLow(dmabuf->phys); 12216 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12217 putPaddrHigh(dmabuf->phys); 12218 } 12219 mbox->vport = phba->pport; 12220 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12221 mbox->context1 = NULL; 12222 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12223 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 12224 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12225 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12226 if (shdr_status || shdr_add_status || rc) { 12227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12228 "2500 EQ_CREATE mailbox failed with " 12229 "status x%x add_status x%x, mbx status x%x\n", 12230 shdr_status, shdr_add_status, rc); 12231 status = -ENXIO; 12232 } 12233 eq->type = LPFC_EQ; 12234 eq->subtype = LPFC_NONE; 12235 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 12236 if (eq->queue_id == 0xFFFF) 12237 status = -ENXIO; 12238 eq->host_index = 0; 12239 eq->hba_index = 0; 12240 12241 mempool_free(mbox, phba->mbox_mem_pool); 12242 return status; 12243 } 12244 12245 /** 12246 * lpfc_cq_create - Create a Completion Queue on the HBA 12247 * @phba: HBA structure that indicates port to create a queue on. 12248 * @cq: The queue structure to use to create the completion queue. 12249 * @eq: The event queue to bind this completion queue to. 12250 * 12251 * This function creates a completion queue, as detailed in @wq, on a port, 12252 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 12253 * 12254 * The @phba struct is used to send mailbox command to HBA. The @cq struct 12255 * is used to get the entry count and entry size that are necessary to 12256 * determine the number of pages to allocate and use for this queue. The @eq 12257 * is used to indicate which event queue to bind this completion queue to. This 12258 * function will send the CQ_CREATE mailbox command to the HBA to setup the 12259 * completion queue. This function is asynchronous and will wait for the mailbox 12260 * command to finish before continuing. 12261 * 12262 * On success this function will return a zero. If unable to allocate enough 12263 * memory this function will return -ENOMEM. If the queue create mailbox command 12264 * fails this function will return -ENXIO. 12265 **/ 12266 uint32_t 12267 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 12268 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 12269 { 12270 struct lpfc_mbx_cq_create *cq_create; 12271 struct lpfc_dmabuf *dmabuf; 12272 LPFC_MBOXQ_t *mbox; 12273 int rc, length, status = 0; 12274 uint32_t shdr_status, shdr_add_status; 12275 union lpfc_sli4_cfg_shdr *shdr; 12276 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12277 12278 /* sanity check on queue memory */ 12279 if (!cq || !eq) 12280 return -ENODEV; 12281 if (!phba->sli4_hba.pc_sli4_params.supported) 12282 hw_page_size = SLI4_PAGE_SIZE; 12283 12284 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12285 if (!mbox) 12286 return -ENOMEM; 12287 length = (sizeof(struct lpfc_mbx_cq_create) - 12288 sizeof(struct lpfc_sli4_cfg_mhdr)); 12289 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12290 LPFC_MBOX_OPCODE_CQ_CREATE, 12291 length, LPFC_SLI4_MBX_EMBED); 12292 cq_create = &mbox->u.mqe.un.cq_create; 12293 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 12294 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 12295 cq->page_count); 12296 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 12297 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 12298 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12299 phba->sli4_hba.pc_sli4_params.cqv); 12300 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 12301 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 12302 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 12303 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 12304 eq->queue_id); 12305 } else { 12306 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 12307 eq->queue_id); 12308 } 12309 switch (cq->entry_count) { 12310 default: 12311 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12312 "0361 Unsupported CQ count. (%d)\n", 12313 cq->entry_count); 12314 if (cq->entry_count < 256) { 12315 status = -EINVAL; 12316 goto out; 12317 } 12318 /* otherwise default to smallest count (drop through) */ 12319 case 256: 12320 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12321 LPFC_CQ_CNT_256); 12322 break; 12323 case 512: 12324 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12325 LPFC_CQ_CNT_512); 12326 break; 12327 case 1024: 12328 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12329 LPFC_CQ_CNT_1024); 12330 break; 12331 } 12332 list_for_each_entry(dmabuf, &cq->page_list, list) { 12333 memset(dmabuf->virt, 0, hw_page_size); 12334 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12335 putPaddrLow(dmabuf->phys); 12336 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12337 putPaddrHigh(dmabuf->phys); 12338 } 12339 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12340 12341 /* The IOCTL status is embedded in the mailbox subheader. */ 12342 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12343 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12344 if (shdr_status || shdr_add_status || rc) { 12345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12346 "2501 CQ_CREATE mailbox failed with " 12347 "status x%x add_status x%x, mbx status x%x\n", 12348 shdr_status, shdr_add_status, rc); 12349 status = -ENXIO; 12350 goto out; 12351 } 12352 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12353 if (cq->queue_id == 0xFFFF) { 12354 status = -ENXIO; 12355 goto out; 12356 } 12357 /* link the cq onto the parent eq child list */ 12358 list_add_tail(&cq->list, &eq->child_list); 12359 /* Set up completion queue's type and subtype */ 12360 cq->type = type; 12361 cq->subtype = subtype; 12362 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12363 cq->assoc_qid = eq->queue_id; 12364 cq->host_index = 0; 12365 cq->hba_index = 0; 12366 12367 out: 12368 mempool_free(mbox, phba->mbox_mem_pool); 12369 return status; 12370 } 12371 12372 /** 12373 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 12374 * @phba: HBA structure that indicates port to create a queue on. 12375 * @mq: The queue structure to use to create the mailbox queue. 12376 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 12377 * @cq: The completion queue to associate with this cq. 12378 * 12379 * This function provides failback (fb) functionality when the 12380 * mq_create_ext fails on older FW generations. It's purpose is identical 12381 * to mq_create_ext otherwise. 12382 * 12383 * This routine cannot fail as all attributes were previously accessed and 12384 * initialized in mq_create_ext. 12385 **/ 12386 static void 12387 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 12388 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 12389 { 12390 struct lpfc_mbx_mq_create *mq_create; 12391 struct lpfc_dmabuf *dmabuf; 12392 int length; 12393 12394 length = (sizeof(struct lpfc_mbx_mq_create) - 12395 sizeof(struct lpfc_sli4_cfg_mhdr)); 12396 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12397 LPFC_MBOX_OPCODE_MQ_CREATE, 12398 length, LPFC_SLI4_MBX_EMBED); 12399 mq_create = &mbox->u.mqe.un.mq_create; 12400 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 12401 mq->page_count); 12402 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 12403 cq->queue_id); 12404 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 12405 switch (mq->entry_count) { 12406 case 16: 12407 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12408 LPFC_MQ_RING_SIZE_16); 12409 break; 12410 case 32: 12411 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12412 LPFC_MQ_RING_SIZE_32); 12413 break; 12414 case 64: 12415 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12416 LPFC_MQ_RING_SIZE_64); 12417 break; 12418 case 128: 12419 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12420 LPFC_MQ_RING_SIZE_128); 12421 break; 12422 } 12423 list_for_each_entry(dmabuf, &mq->page_list, list) { 12424 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12425 putPaddrLow(dmabuf->phys); 12426 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12427 putPaddrHigh(dmabuf->phys); 12428 } 12429 } 12430 12431 /** 12432 * lpfc_mq_create - Create a mailbox Queue on the HBA 12433 * @phba: HBA structure that indicates port to create a queue on. 12434 * @mq: The queue structure to use to create the mailbox queue. 12435 * @cq: The completion queue to associate with this cq. 12436 * @subtype: The queue's subtype. 12437 * 12438 * This function creates a mailbox queue, as detailed in @mq, on a port, 12439 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 12440 * 12441 * The @phba struct is used to send mailbox command to HBA. The @cq struct 12442 * is used to get the entry count and entry size that are necessary to 12443 * determine the number of pages to allocate and use for this queue. This 12444 * function will send the MQ_CREATE mailbox command to the HBA to setup the 12445 * mailbox queue. This function is asynchronous and will wait for the mailbox 12446 * command to finish before continuing. 12447 * 12448 * On success this function will return a zero. If unable to allocate enough 12449 * memory this function will return -ENOMEM. If the queue create mailbox command 12450 * fails this function will return -ENXIO. 12451 **/ 12452 int32_t 12453 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 12454 struct lpfc_queue *cq, uint32_t subtype) 12455 { 12456 struct lpfc_mbx_mq_create *mq_create; 12457 struct lpfc_mbx_mq_create_ext *mq_create_ext; 12458 struct lpfc_dmabuf *dmabuf; 12459 LPFC_MBOXQ_t *mbox; 12460 int rc, length, status = 0; 12461 uint32_t shdr_status, shdr_add_status; 12462 union lpfc_sli4_cfg_shdr *shdr; 12463 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12464 12465 /* sanity check on queue memory */ 12466 if (!mq || !cq) 12467 return -ENODEV; 12468 if (!phba->sli4_hba.pc_sli4_params.supported) 12469 hw_page_size = SLI4_PAGE_SIZE; 12470 12471 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12472 if (!mbox) 12473 return -ENOMEM; 12474 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 12475 sizeof(struct lpfc_sli4_cfg_mhdr)); 12476 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12477 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 12478 length, LPFC_SLI4_MBX_EMBED); 12479 12480 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 12481 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 12482 bf_set(lpfc_mbx_mq_create_ext_num_pages, 12483 &mq_create_ext->u.request, mq->page_count); 12484 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 12485 &mq_create_ext->u.request, 1); 12486 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 12487 &mq_create_ext->u.request, 1); 12488 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 12489 &mq_create_ext->u.request, 1); 12490 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 12491 &mq_create_ext->u.request, 1); 12492 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 12493 &mq_create_ext->u.request, 1); 12494 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 12495 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12496 phba->sli4_hba.pc_sli4_params.mqv); 12497 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 12498 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 12499 cq->queue_id); 12500 else 12501 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 12502 cq->queue_id); 12503 switch (mq->entry_count) { 12504 default: 12505 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12506 "0362 Unsupported MQ count. (%d)\n", 12507 mq->entry_count); 12508 if (mq->entry_count < 16) { 12509 status = -EINVAL; 12510 goto out; 12511 } 12512 /* otherwise default to smallest count (drop through) */ 12513 case 16: 12514 bf_set(lpfc_mq_context_ring_size, 12515 &mq_create_ext->u.request.context, 12516 LPFC_MQ_RING_SIZE_16); 12517 break; 12518 case 32: 12519 bf_set(lpfc_mq_context_ring_size, 12520 &mq_create_ext->u.request.context, 12521 LPFC_MQ_RING_SIZE_32); 12522 break; 12523 case 64: 12524 bf_set(lpfc_mq_context_ring_size, 12525 &mq_create_ext->u.request.context, 12526 LPFC_MQ_RING_SIZE_64); 12527 break; 12528 case 128: 12529 bf_set(lpfc_mq_context_ring_size, 12530 &mq_create_ext->u.request.context, 12531 LPFC_MQ_RING_SIZE_128); 12532 break; 12533 } 12534 list_for_each_entry(dmabuf, &mq->page_list, list) { 12535 memset(dmabuf->virt, 0, hw_page_size); 12536 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 12537 putPaddrLow(dmabuf->phys); 12538 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 12539 putPaddrHigh(dmabuf->phys); 12540 } 12541 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12542 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12543 &mq_create_ext->u.response); 12544 if (rc != MBX_SUCCESS) { 12545 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12546 "2795 MQ_CREATE_EXT failed with " 12547 "status x%x. Failback to MQ_CREATE.\n", 12548 rc); 12549 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 12550 mq_create = &mbox->u.mqe.un.mq_create; 12551 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12552 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 12553 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12554 &mq_create->u.response); 12555 } 12556 12557 /* The IOCTL status is embedded in the mailbox subheader. */ 12558 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12559 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12560 if (shdr_status || shdr_add_status || rc) { 12561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12562 "2502 MQ_CREATE mailbox failed with " 12563 "status x%x add_status x%x, mbx status x%x\n", 12564 shdr_status, shdr_add_status, rc); 12565 status = -ENXIO; 12566 goto out; 12567 } 12568 if (mq->queue_id == 0xFFFF) { 12569 status = -ENXIO; 12570 goto out; 12571 } 12572 mq->type = LPFC_MQ; 12573 mq->assoc_qid = cq->queue_id; 12574 mq->subtype = subtype; 12575 mq->host_index = 0; 12576 mq->hba_index = 0; 12577 12578 /* link the mq onto the parent cq child list */ 12579 list_add_tail(&mq->list, &cq->child_list); 12580 out: 12581 mempool_free(mbox, phba->mbox_mem_pool); 12582 return status; 12583 } 12584 12585 /** 12586 * lpfc_wq_create - Create a Work Queue on the HBA 12587 * @phba: HBA structure that indicates port to create a queue on. 12588 * @wq: The queue structure to use to create the work queue. 12589 * @cq: The completion queue to bind this work queue to. 12590 * @subtype: The subtype of the work queue indicating its functionality. 12591 * 12592 * This function creates a work queue, as detailed in @wq, on a port, described 12593 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 12594 * 12595 * The @phba struct is used to send mailbox command to HBA. The @wq struct 12596 * is used to get the entry count and entry size that are necessary to 12597 * determine the number of pages to allocate and use for this queue. The @cq 12598 * is used to indicate which completion queue to bind this work queue to. This 12599 * function will send the WQ_CREATE mailbox command to the HBA to setup the 12600 * work queue. This function is asynchronous and will wait for the mailbox 12601 * command to finish before continuing. 12602 * 12603 * On success this function will return a zero. If unable to allocate enough 12604 * memory this function will return -ENOMEM. If the queue create mailbox command 12605 * fails this function will return -ENXIO. 12606 **/ 12607 uint32_t 12608 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 12609 struct lpfc_queue *cq, uint32_t subtype) 12610 { 12611 struct lpfc_mbx_wq_create *wq_create; 12612 struct lpfc_dmabuf *dmabuf; 12613 LPFC_MBOXQ_t *mbox; 12614 int rc, length, status = 0; 12615 uint32_t shdr_status, shdr_add_status; 12616 union lpfc_sli4_cfg_shdr *shdr; 12617 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12618 struct dma_address *page; 12619 12620 /* sanity check on queue memory */ 12621 if (!wq || !cq) 12622 return -ENODEV; 12623 if (!phba->sli4_hba.pc_sli4_params.supported) 12624 hw_page_size = SLI4_PAGE_SIZE; 12625 12626 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12627 if (!mbox) 12628 return -ENOMEM; 12629 length = (sizeof(struct lpfc_mbx_wq_create) - 12630 sizeof(struct lpfc_sli4_cfg_mhdr)); 12631 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12632 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 12633 length, LPFC_SLI4_MBX_EMBED); 12634 wq_create = &mbox->u.mqe.un.wq_create; 12635 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 12636 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 12637 wq->page_count); 12638 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 12639 cq->queue_id); 12640 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12641 phba->sli4_hba.pc_sli4_params.wqv); 12642 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 12643 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 12644 wq->entry_count); 12645 switch (wq->entry_size) { 12646 default: 12647 case 64: 12648 bf_set(lpfc_mbx_wq_create_wqe_size, 12649 &wq_create->u.request_1, 12650 LPFC_WQ_WQE_SIZE_64); 12651 break; 12652 case 128: 12653 bf_set(lpfc_mbx_wq_create_wqe_size, 12654 &wq_create->u.request_1, 12655 LPFC_WQ_WQE_SIZE_128); 12656 break; 12657 } 12658 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 12659 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12660 page = wq_create->u.request_1.page; 12661 } else { 12662 page = wq_create->u.request.page; 12663 } 12664 list_for_each_entry(dmabuf, &wq->page_list, list) { 12665 memset(dmabuf->virt, 0, hw_page_size); 12666 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 12667 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 12668 } 12669 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12670 /* The IOCTL status is embedded in the mailbox subheader. */ 12671 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12672 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12673 if (shdr_status || shdr_add_status || rc) { 12674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12675 "2503 WQ_CREATE mailbox failed with " 12676 "status x%x add_status x%x, mbx status x%x\n", 12677 shdr_status, shdr_add_status, rc); 12678 status = -ENXIO; 12679 goto out; 12680 } 12681 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 12682 if (wq->queue_id == 0xFFFF) { 12683 status = -ENXIO; 12684 goto out; 12685 } 12686 wq->type = LPFC_WQ; 12687 wq->assoc_qid = cq->queue_id; 12688 wq->subtype = subtype; 12689 wq->host_index = 0; 12690 wq->hba_index = 0; 12691 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 12692 12693 /* link the wq onto the parent cq child list */ 12694 list_add_tail(&wq->list, &cq->child_list); 12695 out: 12696 mempool_free(mbox, phba->mbox_mem_pool); 12697 return status; 12698 } 12699 12700 /** 12701 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ 12702 * @phba: HBA structure that indicates port to create a queue on. 12703 * @rq: The queue structure to use for the receive queue. 12704 * @qno: The associated HBQ number 12705 * 12706 * 12707 * For SLI4 we need to adjust the RQ repost value based on 12708 * the number of buffers that are initially posted to the RQ. 12709 */ 12710 void 12711 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) 12712 { 12713 uint32_t cnt; 12714 12715 /* sanity check on queue memory */ 12716 if (!rq) 12717 return; 12718 cnt = lpfc_hbq_defs[qno]->entry_count; 12719 12720 /* Recalc repost for RQs based on buffers initially posted */ 12721 cnt = (cnt >> 3); 12722 if (cnt < LPFC_QUEUE_MIN_REPOST) 12723 cnt = LPFC_QUEUE_MIN_REPOST; 12724 12725 rq->entry_repost = cnt; 12726 } 12727 12728 /** 12729 * lpfc_rq_create - Create a Receive Queue on the HBA 12730 * @phba: HBA structure that indicates port to create a queue on. 12731 * @hrq: The queue structure to use to create the header receive queue. 12732 * @drq: The queue structure to use to create the data receive queue. 12733 * @cq: The completion queue to bind this work queue to. 12734 * 12735 * This function creates a receive buffer queue pair , as detailed in @hrq and 12736 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 12737 * to the HBA. 12738 * 12739 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 12740 * struct is used to get the entry count that is necessary to determine the 12741 * number of pages to use for this queue. The @cq is used to indicate which 12742 * completion queue to bind received buffers that are posted to these queues to. 12743 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 12744 * receive queue pair. This function is asynchronous and will wait for the 12745 * mailbox command to finish before continuing. 12746 * 12747 * On success this function will return a zero. If unable to allocate enough 12748 * memory this function will return -ENOMEM. If the queue create mailbox command 12749 * fails this function will return -ENXIO. 12750 **/ 12751 uint32_t 12752 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 12753 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 12754 { 12755 struct lpfc_mbx_rq_create *rq_create; 12756 struct lpfc_dmabuf *dmabuf; 12757 LPFC_MBOXQ_t *mbox; 12758 int rc, length, status = 0; 12759 uint32_t shdr_status, shdr_add_status; 12760 union lpfc_sli4_cfg_shdr *shdr; 12761 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12762 12763 /* sanity check on queue memory */ 12764 if (!hrq || !drq || !cq) 12765 return -ENODEV; 12766 if (!phba->sli4_hba.pc_sli4_params.supported) 12767 hw_page_size = SLI4_PAGE_SIZE; 12768 12769 if (hrq->entry_count != drq->entry_count) 12770 return -EINVAL; 12771 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12772 if (!mbox) 12773 return -ENOMEM; 12774 length = (sizeof(struct lpfc_mbx_rq_create) - 12775 sizeof(struct lpfc_sli4_cfg_mhdr)); 12776 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12777 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 12778 length, LPFC_SLI4_MBX_EMBED); 12779 rq_create = &mbox->u.mqe.un.rq_create; 12780 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 12781 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12782 phba->sli4_hba.pc_sli4_params.rqv); 12783 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 12784 bf_set(lpfc_rq_context_rqe_count_1, 12785 &rq_create->u.request.context, 12786 hrq->entry_count); 12787 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 12788 bf_set(lpfc_rq_context_rqe_size, 12789 &rq_create->u.request.context, 12790 LPFC_RQE_SIZE_8); 12791 bf_set(lpfc_rq_context_page_size, 12792 &rq_create->u.request.context, 12793 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12794 } else { 12795 switch (hrq->entry_count) { 12796 default: 12797 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12798 "2535 Unsupported RQ count. (%d)\n", 12799 hrq->entry_count); 12800 if (hrq->entry_count < 512) { 12801 status = -EINVAL; 12802 goto out; 12803 } 12804 /* otherwise default to smallest count (drop through) */ 12805 case 512: 12806 bf_set(lpfc_rq_context_rqe_count, 12807 &rq_create->u.request.context, 12808 LPFC_RQ_RING_SIZE_512); 12809 break; 12810 case 1024: 12811 bf_set(lpfc_rq_context_rqe_count, 12812 &rq_create->u.request.context, 12813 LPFC_RQ_RING_SIZE_1024); 12814 break; 12815 case 2048: 12816 bf_set(lpfc_rq_context_rqe_count, 12817 &rq_create->u.request.context, 12818 LPFC_RQ_RING_SIZE_2048); 12819 break; 12820 case 4096: 12821 bf_set(lpfc_rq_context_rqe_count, 12822 &rq_create->u.request.context, 12823 LPFC_RQ_RING_SIZE_4096); 12824 break; 12825 } 12826 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12827 LPFC_HDR_BUF_SIZE); 12828 } 12829 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12830 cq->queue_id); 12831 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12832 hrq->page_count); 12833 list_for_each_entry(dmabuf, &hrq->page_list, list) { 12834 memset(dmabuf->virt, 0, hw_page_size); 12835 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12836 putPaddrLow(dmabuf->phys); 12837 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12838 putPaddrHigh(dmabuf->phys); 12839 } 12840 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12841 /* The IOCTL status is embedded in the mailbox subheader. */ 12842 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12843 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12844 if (shdr_status || shdr_add_status || rc) { 12845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12846 "2504 RQ_CREATE mailbox failed with " 12847 "status x%x add_status x%x, mbx status x%x\n", 12848 shdr_status, shdr_add_status, rc); 12849 status = -ENXIO; 12850 goto out; 12851 } 12852 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12853 if (hrq->queue_id == 0xFFFF) { 12854 status = -ENXIO; 12855 goto out; 12856 } 12857 hrq->type = LPFC_HRQ; 12858 hrq->assoc_qid = cq->queue_id; 12859 hrq->subtype = subtype; 12860 hrq->host_index = 0; 12861 hrq->hba_index = 0; 12862 12863 /* now create the data queue */ 12864 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12865 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 12866 length, LPFC_SLI4_MBX_EMBED); 12867 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12868 phba->sli4_hba.pc_sli4_params.rqv); 12869 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 12870 bf_set(lpfc_rq_context_rqe_count_1, 12871 &rq_create->u.request.context, hrq->entry_count); 12872 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 12873 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 12874 LPFC_RQE_SIZE_8); 12875 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 12876 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12877 } else { 12878 switch (drq->entry_count) { 12879 default: 12880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12881 "2536 Unsupported RQ count. (%d)\n", 12882 drq->entry_count); 12883 if (drq->entry_count < 512) { 12884 status = -EINVAL; 12885 goto out; 12886 } 12887 /* otherwise default to smallest count (drop through) */ 12888 case 512: 12889 bf_set(lpfc_rq_context_rqe_count, 12890 &rq_create->u.request.context, 12891 LPFC_RQ_RING_SIZE_512); 12892 break; 12893 case 1024: 12894 bf_set(lpfc_rq_context_rqe_count, 12895 &rq_create->u.request.context, 12896 LPFC_RQ_RING_SIZE_1024); 12897 break; 12898 case 2048: 12899 bf_set(lpfc_rq_context_rqe_count, 12900 &rq_create->u.request.context, 12901 LPFC_RQ_RING_SIZE_2048); 12902 break; 12903 case 4096: 12904 bf_set(lpfc_rq_context_rqe_count, 12905 &rq_create->u.request.context, 12906 LPFC_RQ_RING_SIZE_4096); 12907 break; 12908 } 12909 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12910 LPFC_DATA_BUF_SIZE); 12911 } 12912 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12913 cq->queue_id); 12914 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12915 drq->page_count); 12916 list_for_each_entry(dmabuf, &drq->page_list, list) { 12917 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12918 putPaddrLow(dmabuf->phys); 12919 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12920 putPaddrHigh(dmabuf->phys); 12921 } 12922 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12923 /* The IOCTL status is embedded in the mailbox subheader. */ 12924 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 12925 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12926 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12927 if (shdr_status || shdr_add_status || rc) { 12928 status = -ENXIO; 12929 goto out; 12930 } 12931 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12932 if (drq->queue_id == 0xFFFF) { 12933 status = -ENXIO; 12934 goto out; 12935 } 12936 drq->type = LPFC_DRQ; 12937 drq->assoc_qid = cq->queue_id; 12938 drq->subtype = subtype; 12939 drq->host_index = 0; 12940 drq->hba_index = 0; 12941 12942 /* link the header and data RQs onto the parent cq child list */ 12943 list_add_tail(&hrq->list, &cq->child_list); 12944 list_add_tail(&drq->list, &cq->child_list); 12945 12946 out: 12947 mempool_free(mbox, phba->mbox_mem_pool); 12948 return status; 12949 } 12950 12951 /** 12952 * lpfc_eq_destroy - Destroy an event Queue on the HBA 12953 * @eq: The queue structure associated with the queue to destroy. 12954 * 12955 * This function destroys a queue, as detailed in @eq by sending an mailbox 12956 * command, specific to the type of queue, to the HBA. 12957 * 12958 * The @eq struct is used to get the queue ID of the queue to destroy. 12959 * 12960 * On success this function will return a zero. If the queue destroy mailbox 12961 * command fails this function will return -ENXIO. 12962 **/ 12963 uint32_t 12964 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 12965 { 12966 LPFC_MBOXQ_t *mbox; 12967 int rc, length, status = 0; 12968 uint32_t shdr_status, shdr_add_status; 12969 union lpfc_sli4_cfg_shdr *shdr; 12970 12971 /* sanity check on queue memory */ 12972 if (!eq) 12973 return -ENODEV; 12974 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 12975 if (!mbox) 12976 return -ENOMEM; 12977 length = (sizeof(struct lpfc_mbx_eq_destroy) - 12978 sizeof(struct lpfc_sli4_cfg_mhdr)); 12979 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12980 LPFC_MBOX_OPCODE_EQ_DESTROY, 12981 length, LPFC_SLI4_MBX_EMBED); 12982 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 12983 eq->queue_id); 12984 mbox->vport = eq->phba->pport; 12985 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12986 12987 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 12988 /* The IOCTL status is embedded in the mailbox subheader. */ 12989 shdr = (union lpfc_sli4_cfg_shdr *) 12990 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 12991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12993 if (shdr_status || shdr_add_status || rc) { 12994 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12995 "2505 EQ_DESTROY mailbox failed with " 12996 "status x%x add_status x%x, mbx status x%x\n", 12997 shdr_status, shdr_add_status, rc); 12998 status = -ENXIO; 12999 } 13000 13001 /* Remove eq from any list */ 13002 list_del_init(&eq->list); 13003 mempool_free(mbox, eq->phba->mbox_mem_pool); 13004 return status; 13005 } 13006 13007 /** 13008 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 13009 * @cq: The queue structure associated with the queue to destroy. 13010 * 13011 * This function destroys a queue, as detailed in @cq by sending an mailbox 13012 * command, specific to the type of queue, to the HBA. 13013 * 13014 * The @cq struct is used to get the queue ID of the queue to destroy. 13015 * 13016 * On success this function will return a zero. If the queue destroy mailbox 13017 * command fails this function will return -ENXIO. 13018 **/ 13019 uint32_t 13020 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 13021 { 13022 LPFC_MBOXQ_t *mbox; 13023 int rc, length, status = 0; 13024 uint32_t shdr_status, shdr_add_status; 13025 union lpfc_sli4_cfg_shdr *shdr; 13026 13027 /* sanity check on queue memory */ 13028 if (!cq) 13029 return -ENODEV; 13030 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 13031 if (!mbox) 13032 return -ENOMEM; 13033 length = (sizeof(struct lpfc_mbx_cq_destroy) - 13034 sizeof(struct lpfc_sli4_cfg_mhdr)); 13035 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13036 LPFC_MBOX_OPCODE_CQ_DESTROY, 13037 length, LPFC_SLI4_MBX_EMBED); 13038 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 13039 cq->queue_id); 13040 mbox->vport = cq->phba->pport; 13041 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13042 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 13043 /* The IOCTL status is embedded in the mailbox subheader. */ 13044 shdr = (union lpfc_sli4_cfg_shdr *) 13045 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 13046 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13047 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13048 if (shdr_status || shdr_add_status || rc) { 13049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13050 "2506 CQ_DESTROY mailbox failed with " 13051 "status x%x add_status x%x, mbx status x%x\n", 13052 shdr_status, shdr_add_status, rc); 13053 status = -ENXIO; 13054 } 13055 /* Remove cq from any list */ 13056 list_del_init(&cq->list); 13057 mempool_free(mbox, cq->phba->mbox_mem_pool); 13058 return status; 13059 } 13060 13061 /** 13062 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 13063 * @qm: The queue structure associated with the queue to destroy. 13064 * 13065 * This function destroys a queue, as detailed in @mq by sending an mailbox 13066 * command, specific to the type of queue, to the HBA. 13067 * 13068 * The @mq struct is used to get the queue ID of the queue to destroy. 13069 * 13070 * On success this function will return a zero. If the queue destroy mailbox 13071 * command fails this function will return -ENXIO. 13072 **/ 13073 uint32_t 13074 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 13075 { 13076 LPFC_MBOXQ_t *mbox; 13077 int rc, length, status = 0; 13078 uint32_t shdr_status, shdr_add_status; 13079 union lpfc_sli4_cfg_shdr *shdr; 13080 13081 /* sanity check on queue memory */ 13082 if (!mq) 13083 return -ENODEV; 13084 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 13085 if (!mbox) 13086 return -ENOMEM; 13087 length = (sizeof(struct lpfc_mbx_mq_destroy) - 13088 sizeof(struct lpfc_sli4_cfg_mhdr)); 13089 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13090 LPFC_MBOX_OPCODE_MQ_DESTROY, 13091 length, LPFC_SLI4_MBX_EMBED); 13092 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 13093 mq->queue_id); 13094 mbox->vport = mq->phba->pport; 13095 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13096 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 13097 /* The IOCTL status is embedded in the mailbox subheader. */ 13098 shdr = (union lpfc_sli4_cfg_shdr *) 13099 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 13100 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13101 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13102 if (shdr_status || shdr_add_status || rc) { 13103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13104 "2507 MQ_DESTROY mailbox failed with " 13105 "status x%x add_status x%x, mbx status x%x\n", 13106 shdr_status, shdr_add_status, rc); 13107 status = -ENXIO; 13108 } 13109 /* Remove mq from any list */ 13110 list_del_init(&mq->list); 13111 mempool_free(mbox, mq->phba->mbox_mem_pool); 13112 return status; 13113 } 13114 13115 /** 13116 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 13117 * @wq: The queue structure associated with the queue to destroy. 13118 * 13119 * This function destroys a queue, as detailed in @wq by sending an mailbox 13120 * command, specific to the type of queue, to the HBA. 13121 * 13122 * The @wq struct is used to get the queue ID of the queue to destroy. 13123 * 13124 * On success this function will return a zero. If the queue destroy mailbox 13125 * command fails this function will return -ENXIO. 13126 **/ 13127 uint32_t 13128 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 13129 { 13130 LPFC_MBOXQ_t *mbox; 13131 int rc, length, status = 0; 13132 uint32_t shdr_status, shdr_add_status; 13133 union lpfc_sli4_cfg_shdr *shdr; 13134 13135 /* sanity check on queue memory */ 13136 if (!wq) 13137 return -ENODEV; 13138 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 13139 if (!mbox) 13140 return -ENOMEM; 13141 length = (sizeof(struct lpfc_mbx_wq_destroy) - 13142 sizeof(struct lpfc_sli4_cfg_mhdr)); 13143 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13144 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 13145 length, LPFC_SLI4_MBX_EMBED); 13146 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 13147 wq->queue_id); 13148 mbox->vport = wq->phba->pport; 13149 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13150 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 13151 shdr = (union lpfc_sli4_cfg_shdr *) 13152 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 13153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13155 if (shdr_status || shdr_add_status || rc) { 13156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13157 "2508 WQ_DESTROY mailbox failed with " 13158 "status x%x add_status x%x, mbx status x%x\n", 13159 shdr_status, shdr_add_status, rc); 13160 status = -ENXIO; 13161 } 13162 /* Remove wq from any list */ 13163 list_del_init(&wq->list); 13164 mempool_free(mbox, wq->phba->mbox_mem_pool); 13165 return status; 13166 } 13167 13168 /** 13169 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 13170 * @rq: The queue structure associated with the queue to destroy. 13171 * 13172 * This function destroys a queue, as detailed in @rq by sending an mailbox 13173 * command, specific to the type of queue, to the HBA. 13174 * 13175 * The @rq struct is used to get the queue ID of the queue to destroy. 13176 * 13177 * On success this function will return a zero. If the queue destroy mailbox 13178 * command fails this function will return -ENXIO. 13179 **/ 13180 uint32_t 13181 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 13182 struct lpfc_queue *drq) 13183 { 13184 LPFC_MBOXQ_t *mbox; 13185 int rc, length, status = 0; 13186 uint32_t shdr_status, shdr_add_status; 13187 union lpfc_sli4_cfg_shdr *shdr; 13188 13189 /* sanity check on queue memory */ 13190 if (!hrq || !drq) 13191 return -ENODEV; 13192 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 13193 if (!mbox) 13194 return -ENOMEM; 13195 length = (sizeof(struct lpfc_mbx_rq_destroy) - 13196 sizeof(struct lpfc_sli4_cfg_mhdr)); 13197 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13198 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 13199 length, LPFC_SLI4_MBX_EMBED); 13200 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 13201 hrq->queue_id); 13202 mbox->vport = hrq->phba->pport; 13203 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13204 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 13205 /* The IOCTL status is embedded in the mailbox subheader. */ 13206 shdr = (union lpfc_sli4_cfg_shdr *) 13207 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 13208 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13209 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13210 if (shdr_status || shdr_add_status || rc) { 13211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13212 "2509 RQ_DESTROY mailbox failed with " 13213 "status x%x add_status x%x, mbx status x%x\n", 13214 shdr_status, shdr_add_status, rc); 13215 if (rc != MBX_TIMEOUT) 13216 mempool_free(mbox, hrq->phba->mbox_mem_pool); 13217 return -ENXIO; 13218 } 13219 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 13220 drq->queue_id); 13221 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 13222 shdr = (union lpfc_sli4_cfg_shdr *) 13223 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 13224 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13225 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13226 if (shdr_status || shdr_add_status || rc) { 13227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13228 "2510 RQ_DESTROY mailbox failed with " 13229 "status x%x add_status x%x, mbx status x%x\n", 13230 shdr_status, shdr_add_status, rc); 13231 status = -ENXIO; 13232 } 13233 list_del_init(&hrq->list); 13234 list_del_init(&drq->list); 13235 mempool_free(mbox, hrq->phba->mbox_mem_pool); 13236 return status; 13237 } 13238 13239 /** 13240 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 13241 * @phba: The virtual port for which this call being executed. 13242 * @pdma_phys_addr0: Physical address of the 1st SGL page. 13243 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 13244 * @xritag: the xritag that ties this io to the SGL pages. 13245 * 13246 * This routine will post the sgl pages for the IO that has the xritag 13247 * that is in the iocbq structure. The xritag is assigned during iocbq 13248 * creation and persists for as long as the driver is loaded. 13249 * if the caller has fewer than 256 scatter gather segments to map then 13250 * pdma_phys_addr1 should be 0. 13251 * If the caller needs to map more than 256 scatter gather segment then 13252 * pdma_phys_addr1 should be a valid physical address. 13253 * physical address for SGLs must be 64 byte aligned. 13254 * If you are going to map 2 SGL's then the first one must have 256 entries 13255 * the second sgl can have between 1 and 256 entries. 13256 * 13257 * Return codes: 13258 * 0 - Success 13259 * -ENXIO, -ENOMEM - Failure 13260 **/ 13261 int 13262 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 13263 dma_addr_t pdma_phys_addr0, 13264 dma_addr_t pdma_phys_addr1, 13265 uint16_t xritag) 13266 { 13267 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 13268 LPFC_MBOXQ_t *mbox; 13269 int rc; 13270 uint32_t shdr_status, shdr_add_status; 13271 uint32_t mbox_tmo; 13272 union lpfc_sli4_cfg_shdr *shdr; 13273 13274 if (xritag == NO_XRI) { 13275 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13276 "0364 Invalid param:\n"); 13277 return -EINVAL; 13278 } 13279 13280 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13281 if (!mbox) 13282 return -ENOMEM; 13283 13284 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13285 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13286 sizeof(struct lpfc_mbx_post_sgl_pages) - 13287 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 13288 13289 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 13290 &mbox->u.mqe.un.post_sgl_pages; 13291 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 13292 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 13293 13294 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 13295 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 13296 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 13297 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 13298 13299 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 13300 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 13301 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 13302 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 13303 if (!phba->sli4_hba.intr_enable) 13304 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13305 else { 13306 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13307 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13308 } 13309 /* The IOCTL status is embedded in the mailbox subheader. */ 13310 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 13311 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13312 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13313 if (rc != MBX_TIMEOUT) 13314 mempool_free(mbox, phba->mbox_mem_pool); 13315 if (shdr_status || shdr_add_status || rc) { 13316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13317 "2511 POST_SGL mailbox failed with " 13318 "status x%x add_status x%x, mbx status x%x\n", 13319 shdr_status, shdr_add_status, rc); 13320 rc = -ENXIO; 13321 } 13322 return 0; 13323 } 13324 13325 /** 13326 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 13327 * @phba: pointer to lpfc hba data structure. 13328 * 13329 * This routine is invoked to post rpi header templates to the 13330 * HBA consistent with the SLI-4 interface spec. This routine 13331 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 13332 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 13333 * 13334 * Returns 13335 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 13336 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 13337 **/ 13338 uint16_t 13339 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 13340 { 13341 unsigned long xri; 13342 13343 /* 13344 * Fetch the next logical xri. Because this index is logical, 13345 * the driver starts at 0 each time. 13346 */ 13347 spin_lock_irq(&phba->hbalock); 13348 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 13349 phba->sli4_hba.max_cfg_param.max_xri, 0); 13350 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 13351 spin_unlock_irq(&phba->hbalock); 13352 return NO_XRI; 13353 } else { 13354 set_bit(xri, phba->sli4_hba.xri_bmask); 13355 phba->sli4_hba.max_cfg_param.xri_used++; 13356 } 13357 spin_unlock_irq(&phba->hbalock); 13358 return xri; 13359 } 13360 13361 /** 13362 * lpfc_sli4_free_xri - Release an xri for reuse. 13363 * @phba: pointer to lpfc hba data structure. 13364 * 13365 * This routine is invoked to release an xri to the pool of 13366 * available rpis maintained by the driver. 13367 **/ 13368 void 13369 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13370 { 13371 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 13372 phba->sli4_hba.max_cfg_param.xri_used--; 13373 } 13374 } 13375 13376 /** 13377 * lpfc_sli4_free_xri - Release an xri for reuse. 13378 * @phba: pointer to lpfc hba data structure. 13379 * 13380 * This routine is invoked to release an xri to the pool of 13381 * available rpis maintained by the driver. 13382 **/ 13383 void 13384 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13385 { 13386 spin_lock_irq(&phba->hbalock); 13387 __lpfc_sli4_free_xri(phba, xri); 13388 spin_unlock_irq(&phba->hbalock); 13389 } 13390 13391 /** 13392 * lpfc_sli4_next_xritag - Get an xritag for the io 13393 * @phba: Pointer to HBA context object. 13394 * 13395 * This function gets an xritag for the iocb. If there is no unused xritag 13396 * it will return 0xffff. 13397 * The function returns the allocated xritag if successful, else returns zero. 13398 * Zero is not a valid xritag. 13399 * The caller is not required to hold any lock. 13400 **/ 13401 uint16_t 13402 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 13403 { 13404 uint16_t xri_index; 13405 13406 xri_index = lpfc_sli4_alloc_xri(phba); 13407 if (xri_index == NO_XRI) 13408 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13409 "2004 Failed to allocate XRI.last XRITAG is %d" 13410 " Max XRI is %d, Used XRI is %d\n", 13411 xri_index, 13412 phba->sli4_hba.max_cfg_param.max_xri, 13413 phba->sli4_hba.max_cfg_param.xri_used); 13414 return xri_index; 13415 } 13416 13417 /** 13418 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 13419 * @phba: pointer to lpfc hba data structure. 13420 * @post_sgl_list: pointer to els sgl entry list. 13421 * @count: number of els sgl entries on the list. 13422 * 13423 * This routine is invoked to post a block of driver's sgl pages to the 13424 * HBA using non-embedded mailbox command. No Lock is held. This routine 13425 * is only called when the driver is loading and after all IO has been 13426 * stopped. 13427 **/ 13428 static int 13429 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, 13430 struct list_head *post_sgl_list, 13431 int post_cnt) 13432 { 13433 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 13434 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13435 struct sgl_page_pairs *sgl_pg_pairs; 13436 void *viraddr; 13437 LPFC_MBOXQ_t *mbox; 13438 uint32_t reqlen, alloclen, pg_pairs; 13439 uint32_t mbox_tmo; 13440 uint16_t xritag_start = 0; 13441 int rc = 0; 13442 uint32_t shdr_status, shdr_add_status; 13443 union lpfc_sli4_cfg_shdr *shdr; 13444 13445 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) + 13446 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13447 if (reqlen > SLI4_PAGE_SIZE) { 13448 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13449 "2559 Block sgl registration required DMA " 13450 "size (%d) great than a page\n", reqlen); 13451 return -ENOMEM; 13452 } 13453 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13454 if (!mbox) 13455 return -ENOMEM; 13456 13457 /* Allocate DMA memory and set up the non-embedded mailbox command */ 13458 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13459 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 13460 LPFC_SLI4_MBX_NEMBED); 13461 13462 if (alloclen < reqlen) { 13463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13464 "0285 Allocated DMA memory size (%d) is " 13465 "less than the requested DMA memory " 13466 "size (%d)\n", alloclen, reqlen); 13467 lpfc_sli4_mbox_cmd_free(phba, mbox); 13468 return -ENOMEM; 13469 } 13470 /* Set up the SGL pages in the non-embedded DMA pages */ 13471 viraddr = mbox->sge_array->addr[0]; 13472 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13473 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13474 13475 pg_pairs = 0; 13476 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 13477 /* Set up the sge entry */ 13478 sgl_pg_pairs->sgl_pg0_addr_lo = 13479 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13480 sgl_pg_pairs->sgl_pg0_addr_hi = 13481 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 13482 sgl_pg_pairs->sgl_pg1_addr_lo = 13483 cpu_to_le32(putPaddrLow(0)); 13484 sgl_pg_pairs->sgl_pg1_addr_hi = 13485 cpu_to_le32(putPaddrHigh(0)); 13486 13487 /* Keep the first xritag on the list */ 13488 if (pg_pairs == 0) 13489 xritag_start = sglq_entry->sli4_xritag; 13490 sgl_pg_pairs++; 13491 pg_pairs++; 13492 } 13493 13494 /* Complete initialization and perform endian conversion. */ 13495 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13496 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt); 13497 sgl->word0 = cpu_to_le32(sgl->word0); 13498 if (!phba->sli4_hba.intr_enable) 13499 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13500 else { 13501 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13502 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13503 } 13504 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13505 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13506 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13507 if (rc != MBX_TIMEOUT) 13508 lpfc_sli4_mbox_cmd_free(phba, mbox); 13509 if (shdr_status || shdr_add_status || rc) { 13510 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13511 "2513 POST_SGL_BLOCK mailbox command failed " 13512 "status x%x add_status x%x mbx status x%x\n", 13513 shdr_status, shdr_add_status, rc); 13514 rc = -ENXIO; 13515 } 13516 return rc; 13517 } 13518 13519 /** 13520 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 13521 * @phba: pointer to lpfc hba data structure. 13522 * @sblist: pointer to scsi buffer list. 13523 * @count: number of scsi buffers on the list. 13524 * 13525 * This routine is invoked to post a block of @count scsi sgl pages from a 13526 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 13527 * No Lock is held. 13528 * 13529 **/ 13530 int 13531 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 13532 struct list_head *sblist, 13533 int count) 13534 { 13535 struct lpfc_scsi_buf *psb; 13536 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13537 struct sgl_page_pairs *sgl_pg_pairs; 13538 void *viraddr; 13539 LPFC_MBOXQ_t *mbox; 13540 uint32_t reqlen, alloclen, pg_pairs; 13541 uint32_t mbox_tmo; 13542 uint16_t xritag_start = 0; 13543 int rc = 0; 13544 uint32_t shdr_status, shdr_add_status; 13545 dma_addr_t pdma_phys_bpl1; 13546 union lpfc_sli4_cfg_shdr *shdr; 13547 13548 /* Calculate the requested length of the dma memory */ 13549 reqlen = count * sizeof(struct sgl_page_pairs) + 13550 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13551 if (reqlen > SLI4_PAGE_SIZE) { 13552 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13553 "0217 Block sgl registration required DMA " 13554 "size (%d) great than a page\n", reqlen); 13555 return -ENOMEM; 13556 } 13557 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13558 if (!mbox) { 13559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13560 "0283 Failed to allocate mbox cmd memory\n"); 13561 return -ENOMEM; 13562 } 13563 13564 /* Allocate DMA memory and set up the non-embedded mailbox command */ 13565 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13566 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 13567 LPFC_SLI4_MBX_NEMBED); 13568 13569 if (alloclen < reqlen) { 13570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13571 "2561 Allocated DMA memory size (%d) is " 13572 "less than the requested DMA memory " 13573 "size (%d)\n", alloclen, reqlen); 13574 lpfc_sli4_mbox_cmd_free(phba, mbox); 13575 return -ENOMEM; 13576 } 13577 13578 /* Get the first SGE entry from the non-embedded DMA memory */ 13579 viraddr = mbox->sge_array->addr[0]; 13580 13581 /* Set up the SGL pages in the non-embedded DMA pages */ 13582 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13583 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13584 13585 pg_pairs = 0; 13586 list_for_each_entry(psb, sblist, list) { 13587 /* Set up the sge entry */ 13588 sgl_pg_pairs->sgl_pg0_addr_lo = 13589 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 13590 sgl_pg_pairs->sgl_pg0_addr_hi = 13591 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 13592 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 13593 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 13594 else 13595 pdma_phys_bpl1 = 0; 13596 sgl_pg_pairs->sgl_pg1_addr_lo = 13597 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 13598 sgl_pg_pairs->sgl_pg1_addr_hi = 13599 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 13600 /* Keep the first xritag on the list */ 13601 if (pg_pairs == 0) 13602 xritag_start = psb->cur_iocbq.sli4_xritag; 13603 sgl_pg_pairs++; 13604 pg_pairs++; 13605 } 13606 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13607 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 13608 /* Perform endian conversion if necessary */ 13609 sgl->word0 = cpu_to_le32(sgl->word0); 13610 13611 if (!phba->sli4_hba.intr_enable) 13612 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13613 else { 13614 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13615 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13616 } 13617 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13618 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13619 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13620 if (rc != MBX_TIMEOUT) 13621 lpfc_sli4_mbox_cmd_free(phba, mbox); 13622 if (shdr_status || shdr_add_status || rc) { 13623 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13624 "2564 POST_SGL_BLOCK mailbox command failed " 13625 "status x%x add_status x%x mbx status x%x\n", 13626 shdr_status, shdr_add_status, rc); 13627 rc = -ENXIO; 13628 } 13629 return rc; 13630 } 13631 13632 /** 13633 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13634 * @phba: pointer to lpfc_hba struct that the frame was received on 13635 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13636 * 13637 * This function checks the fields in the @fc_hdr to see if the FC frame is a 13638 * valid type of frame that the LPFC driver will handle. This function will 13639 * return a zero if the frame is a valid frame or a non zero value when the 13640 * frame does not pass the check. 13641 **/ 13642 static int 13643 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 13644 { 13645 /* make rctl_names static to save stack space */ 13646 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 13647 char *type_names[] = FC_TYPE_NAMES_INIT; 13648 struct fc_vft_header *fc_vft_hdr; 13649 uint32_t *header = (uint32_t *) fc_hdr; 13650 13651 switch (fc_hdr->fh_r_ctl) { 13652 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 13653 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 13654 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 13655 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 13656 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 13657 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 13658 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 13659 case FC_RCTL_DD_CMD_STATUS: /* command status */ 13660 case FC_RCTL_ELS_REQ: /* extended link services request */ 13661 case FC_RCTL_ELS_REP: /* extended link services reply */ 13662 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 13663 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 13664 case FC_RCTL_BA_NOP: /* basic link service NOP */ 13665 case FC_RCTL_BA_ABTS: /* basic link service abort */ 13666 case FC_RCTL_BA_RMC: /* remove connection */ 13667 case FC_RCTL_BA_ACC: /* basic accept */ 13668 case FC_RCTL_BA_RJT: /* basic reject */ 13669 case FC_RCTL_BA_PRMT: 13670 case FC_RCTL_ACK_1: /* acknowledge_1 */ 13671 case FC_RCTL_ACK_0: /* acknowledge_0 */ 13672 case FC_RCTL_P_RJT: /* port reject */ 13673 case FC_RCTL_F_RJT: /* fabric reject */ 13674 case FC_RCTL_P_BSY: /* port busy */ 13675 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 13676 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 13677 case FC_RCTL_LCR: /* link credit reset */ 13678 case FC_RCTL_END: /* end */ 13679 break; 13680 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 13681 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13682 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 13683 return lpfc_fc_frame_check(phba, fc_hdr); 13684 default: 13685 goto drop; 13686 } 13687 switch (fc_hdr->fh_type) { 13688 case FC_TYPE_BLS: 13689 case FC_TYPE_ELS: 13690 case FC_TYPE_FCP: 13691 case FC_TYPE_CT: 13692 break; 13693 case FC_TYPE_IP: 13694 case FC_TYPE_ILS: 13695 default: 13696 goto drop; 13697 } 13698 13699 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13700 "2538 Received frame rctl:%s type:%s " 13701 "Frame Data:%08x %08x %08x %08x %08x %08x\n", 13702 rctl_names[fc_hdr->fh_r_ctl], 13703 type_names[fc_hdr->fh_type], 13704 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 13705 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 13706 be32_to_cpu(header[4]), be32_to_cpu(header[5])); 13707 return 0; 13708 drop: 13709 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 13710 "2539 Dropped frame rctl:%s type:%s\n", 13711 rctl_names[fc_hdr->fh_r_ctl], 13712 type_names[fc_hdr->fh_type]); 13713 return 1; 13714 } 13715 13716 /** 13717 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 13718 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13719 * 13720 * This function processes the FC header to retrieve the VFI from the VF 13721 * header, if one exists. This function will return the VFI if one exists 13722 * or 0 if no VSAN Header exists. 13723 **/ 13724 static uint32_t 13725 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 13726 { 13727 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13728 13729 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 13730 return 0; 13731 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 13732 } 13733 13734 /** 13735 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 13736 * @phba: Pointer to the HBA structure to search for the vport on 13737 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13738 * @fcfi: The FC Fabric ID that the frame came from 13739 * 13740 * This function searches the @phba for a vport that matches the content of the 13741 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 13742 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 13743 * returns the matching vport pointer or NULL if unable to match frame to a 13744 * vport. 13745 **/ 13746 static struct lpfc_vport * 13747 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 13748 uint16_t fcfi) 13749 { 13750 struct lpfc_vport **vports; 13751 struct lpfc_vport *vport = NULL; 13752 int i; 13753 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 13754 fc_hdr->fh_d_id[1] << 8 | 13755 fc_hdr->fh_d_id[2]); 13756 13757 if (did == Fabric_DID) 13758 return phba->pport; 13759 if ((phba->pport->fc_flag & FC_PT2PT) && 13760 !(phba->link_state == LPFC_HBA_READY)) 13761 return phba->pport; 13762 13763 vports = lpfc_create_vport_work_array(phba); 13764 if (vports != NULL) 13765 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 13766 if (phba->fcf.fcfi == fcfi && 13767 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 13768 vports[i]->fc_myDID == did) { 13769 vport = vports[i]; 13770 break; 13771 } 13772 } 13773 lpfc_destroy_vport_work_array(phba, vports); 13774 return vport; 13775 } 13776 13777 /** 13778 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 13779 * @vport: The vport to work on. 13780 * 13781 * This function updates the receive sequence time stamp for this vport. The 13782 * receive sequence time stamp indicates the time that the last frame of the 13783 * the sequence that has been idle for the longest amount of time was received. 13784 * the driver uses this time stamp to indicate if any received sequences have 13785 * timed out. 13786 **/ 13787 void 13788 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 13789 { 13790 struct lpfc_dmabuf *h_buf; 13791 struct hbq_dmabuf *dmabuf = NULL; 13792 13793 /* get the oldest sequence on the rcv list */ 13794 h_buf = list_get_first(&vport->rcv_buffer_list, 13795 struct lpfc_dmabuf, list); 13796 if (!h_buf) 13797 return; 13798 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13799 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 13800 } 13801 13802 /** 13803 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 13804 * @vport: The vport that the received sequences were sent to. 13805 * 13806 * This function cleans up all outstanding received sequences. This is called 13807 * by the driver when a link event or user action invalidates all the received 13808 * sequences. 13809 **/ 13810 void 13811 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 13812 { 13813 struct lpfc_dmabuf *h_buf, *hnext; 13814 struct lpfc_dmabuf *d_buf, *dnext; 13815 struct hbq_dmabuf *dmabuf = NULL; 13816 13817 /* start with the oldest sequence on the rcv list */ 13818 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13819 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13820 list_del_init(&dmabuf->hbuf.list); 13821 list_for_each_entry_safe(d_buf, dnext, 13822 &dmabuf->dbuf.list, list) { 13823 list_del_init(&d_buf->list); 13824 lpfc_in_buf_free(vport->phba, d_buf); 13825 } 13826 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13827 } 13828 } 13829 13830 /** 13831 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 13832 * @vport: The vport that the received sequences were sent to. 13833 * 13834 * This function determines whether any received sequences have timed out by 13835 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 13836 * indicates that there is at least one timed out sequence this routine will 13837 * go through the received sequences one at a time from most inactive to most 13838 * active to determine which ones need to be cleaned up. Once it has determined 13839 * that a sequence needs to be cleaned up it will simply free up the resources 13840 * without sending an abort. 13841 **/ 13842 void 13843 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 13844 { 13845 struct lpfc_dmabuf *h_buf, *hnext; 13846 struct lpfc_dmabuf *d_buf, *dnext; 13847 struct hbq_dmabuf *dmabuf = NULL; 13848 unsigned long timeout; 13849 int abort_count = 0; 13850 13851 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13852 vport->rcv_buffer_time_stamp); 13853 if (list_empty(&vport->rcv_buffer_list) || 13854 time_before(jiffies, timeout)) 13855 return; 13856 /* start with the oldest sequence on the rcv list */ 13857 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13858 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13859 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13860 dmabuf->time_stamp); 13861 if (time_before(jiffies, timeout)) 13862 break; 13863 abort_count++; 13864 list_del_init(&dmabuf->hbuf.list); 13865 list_for_each_entry_safe(d_buf, dnext, 13866 &dmabuf->dbuf.list, list) { 13867 list_del_init(&d_buf->list); 13868 lpfc_in_buf_free(vport->phba, d_buf); 13869 } 13870 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13871 } 13872 if (abort_count) 13873 lpfc_update_rcv_time_stamp(vport); 13874 } 13875 13876 /** 13877 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 13878 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 13879 * 13880 * This function searches through the existing incomplete sequences that have 13881 * been sent to this @vport. If the frame matches one of the incomplete 13882 * sequences then the dbuf in the @dmabuf is added to the list of frames that 13883 * make up that sequence. If no sequence is found that matches this frame then 13884 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 13885 * This function returns a pointer to the first dmabuf in the sequence list that 13886 * the frame was linked to. 13887 **/ 13888 static struct hbq_dmabuf * 13889 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 13890 { 13891 struct fc_frame_header *new_hdr; 13892 struct fc_frame_header *temp_hdr; 13893 struct lpfc_dmabuf *d_buf; 13894 struct lpfc_dmabuf *h_buf; 13895 struct hbq_dmabuf *seq_dmabuf = NULL; 13896 struct hbq_dmabuf *temp_dmabuf = NULL; 13897 13898 INIT_LIST_HEAD(&dmabuf->dbuf.list); 13899 dmabuf->time_stamp = jiffies; 13900 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13901 /* Use the hdr_buf to find the sequence that this frame belongs to */ 13902 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 13903 temp_hdr = (struct fc_frame_header *)h_buf->virt; 13904 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 13905 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 13906 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 13907 continue; 13908 /* found a pending sequence that matches this frame */ 13909 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13910 break; 13911 } 13912 if (!seq_dmabuf) { 13913 /* 13914 * This indicates first frame received for this sequence. 13915 * Queue the buffer on the vport's rcv_buffer_list. 13916 */ 13917 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 13918 lpfc_update_rcv_time_stamp(vport); 13919 return dmabuf; 13920 } 13921 temp_hdr = seq_dmabuf->hbuf.virt; 13922 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 13923 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 13924 list_del_init(&seq_dmabuf->hbuf.list); 13925 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 13926 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 13927 lpfc_update_rcv_time_stamp(vport); 13928 return dmabuf; 13929 } 13930 /* move this sequence to the tail to indicate a young sequence */ 13931 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 13932 seq_dmabuf->time_stamp = jiffies; 13933 lpfc_update_rcv_time_stamp(vport); 13934 if (list_empty(&seq_dmabuf->dbuf.list)) { 13935 temp_hdr = dmabuf->hbuf.virt; 13936 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 13937 return seq_dmabuf; 13938 } 13939 /* find the correct place in the sequence to insert this frame */ 13940 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 13941 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 13942 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 13943 /* 13944 * If the frame's sequence count is greater than the frame on 13945 * the list then insert the frame right after this frame 13946 */ 13947 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 13948 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 13949 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 13950 return seq_dmabuf; 13951 } 13952 } 13953 return NULL; 13954 } 13955 13956 /** 13957 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 13958 * @vport: pointer to a vitural port 13959 * @dmabuf: pointer to a dmabuf that describes the FC sequence 13960 * 13961 * This function tries to abort from the partially assembed sequence, described 13962 * by the information from basic abbort @dmabuf. It checks to see whether such 13963 * partially assembled sequence held by the driver. If so, it shall free up all 13964 * the frames from the partially assembled sequence. 13965 * 13966 * Return 13967 * true -- if there is matching partially assembled sequence present and all 13968 * the frames freed with the sequence; 13969 * false -- if there is no matching partially assembled sequence present so 13970 * nothing got aborted in the lower layer driver 13971 **/ 13972 static bool 13973 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 13974 struct hbq_dmabuf *dmabuf) 13975 { 13976 struct fc_frame_header *new_hdr; 13977 struct fc_frame_header *temp_hdr; 13978 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 13979 struct hbq_dmabuf *seq_dmabuf = NULL; 13980 13981 /* Use the hdr_buf to find the sequence that matches this frame */ 13982 INIT_LIST_HEAD(&dmabuf->dbuf.list); 13983 INIT_LIST_HEAD(&dmabuf->hbuf.list); 13984 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13985 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 13986 temp_hdr = (struct fc_frame_header *)h_buf->virt; 13987 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 13988 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 13989 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 13990 continue; 13991 /* found a pending sequence that matches this frame */ 13992 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13993 break; 13994 } 13995 13996 /* Free up all the frames from the partially assembled sequence */ 13997 if (seq_dmabuf) { 13998 list_for_each_entry_safe(d_buf, n_buf, 13999 &seq_dmabuf->dbuf.list, list) { 14000 list_del_init(&d_buf->list); 14001 lpfc_in_buf_free(vport->phba, d_buf); 14002 } 14003 return true; 14004 } 14005 return false; 14006 } 14007 14008 /** 14009 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14010 * @phba: Pointer to HBA context object. 14011 * @cmd_iocbq: pointer to the command iocbq structure. 14012 * @rsp_iocbq: pointer to the response iocbq structure. 14013 * 14014 * This function handles the sequence abort response iocb command complete 14015 * event. It properly releases the memory allocated to the sequence abort 14016 * accept iocb. 14017 **/ 14018 static void 14019 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 14020 struct lpfc_iocbq *cmd_iocbq, 14021 struct lpfc_iocbq *rsp_iocbq) 14022 { 14023 if (cmd_iocbq) 14024 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14025 14026 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 14027 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 14028 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14029 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 14030 rsp_iocbq->iocb.ulpStatus, 14031 rsp_iocbq->iocb.un.ulpWord[4]); 14032 } 14033 14034 /** 14035 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 14036 * @phba: Pointer to HBA context object. 14037 * @xri: xri id in transaction. 14038 * 14039 * This function validates the xri maps to the known range of XRIs allocated an 14040 * used by the driver. 14041 **/ 14042 uint16_t 14043 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 14044 uint16_t xri) 14045 { 14046 int i; 14047 14048 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 14049 if (xri == phba->sli4_hba.xri_ids[i]) 14050 return i; 14051 } 14052 return NO_XRI; 14053 } 14054 14055 /** 14056 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 14057 * @phba: Pointer to HBA context object. 14058 * @fc_hdr: pointer to a FC frame header. 14059 * 14060 * This function sends a basic response to a previous unsol sequence abort 14061 * event after aborting the sequence handling. 14062 **/ 14063 static void 14064 lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, 14065 struct fc_frame_header *fc_hdr) 14066 { 14067 struct lpfc_iocbq *ctiocb = NULL; 14068 struct lpfc_nodelist *ndlp; 14069 uint16_t oxid, rxid, xri, lxri; 14070 uint32_t sid, fctl; 14071 IOCB_t *icmd; 14072 int rc; 14073 14074 if (!lpfc_is_link_up(phba)) 14075 return; 14076 14077 sid = sli4_sid_from_fc_hdr(fc_hdr); 14078 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 14079 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 14080 14081 ndlp = lpfc_findnode_did(phba->pport, sid); 14082 if (!ndlp) { 14083 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14084 "1268 Find ndlp returned NULL for oxid:x%x " 14085 "SID:x%x\n", oxid, sid); 14086 return; 14087 } 14088 14089 /* Allocate buffer for rsp iocb */ 14090 ctiocb = lpfc_sli_get_iocbq(phba); 14091 if (!ctiocb) 14092 return; 14093 14094 /* Extract the F_CTL field from FC_HDR */ 14095 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 14096 14097 icmd = &ctiocb->iocb; 14098 icmd->un.xseq64.bdl.bdeSize = 0; 14099 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 14100 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 14101 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 14102 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 14103 14104 /* Fill in the rest of iocb fields */ 14105 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 14106 icmd->ulpBdeCount = 0; 14107 icmd->ulpLe = 1; 14108 icmd->ulpClass = CLASS3; 14109 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 14110 ctiocb->context1 = ndlp; 14111 14112 ctiocb->iocb_cmpl = NULL; 14113 ctiocb->vport = phba->pport; 14114 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 14115 ctiocb->sli4_lxritag = NO_XRI; 14116 ctiocb->sli4_xritag = NO_XRI; 14117 14118 if (fctl & FC_FC_EX_CTX) 14119 /* Exchange responder sent the abort so we 14120 * own the oxid. 14121 */ 14122 xri = oxid; 14123 else 14124 xri = rxid; 14125 lxri = lpfc_sli4_xri_inrange(phba, xri); 14126 if (lxri != NO_XRI) 14127 lpfc_set_rrq_active(phba, ndlp, lxri, 14128 (xri == oxid) ? rxid : oxid, 0); 14129 /* If the oxid maps to the FCP XRI range or if it is out of range, 14130 * send a BLS_RJT. The driver no longer has that exchange. 14131 * Override the IOCB for a BA_RJT. 14132 */ 14133 if (xri > (phba->sli4_hba.max_cfg_param.max_xri + 14134 phba->sli4_hba.max_cfg_param.xri_base) || 14135 xri > (lpfc_sli4_get_els_iocb_cnt(phba) + 14136 phba->sli4_hba.max_cfg_param.xri_base)) { 14137 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14138 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14139 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14140 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 14141 } 14142 14143 if (fctl & FC_FC_EX_CTX) { 14144 /* ABTS sent by responder to CT exchange, construction 14145 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 14146 * field and RX_ID from ABTS for RX_ID field. 14147 */ 14148 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 14149 } else { 14150 /* ABTS sent by initiator to CT exchange, construction 14151 * of BA_ACC will need to allocate a new XRI as for the 14152 * XRI_TAG field. 14153 */ 14154 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 14155 } 14156 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 14157 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14158 14159 /* Xmit CT abts response on exchange <xid> */ 14160 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14161 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 14162 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 14163 14164 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 14165 if (rc == IOCB_ERROR) { 14166 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 14167 "2925 Failed to issue CT ABTS RSP x%x on " 14168 "xri x%x, Data x%x\n", 14169 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 14170 phba->link_state); 14171 lpfc_sli_release_iocbq(phba, ctiocb); 14172 } 14173 } 14174 14175 /** 14176 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 14177 * @vport: Pointer to the vport on which this sequence was received 14178 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14179 * 14180 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 14181 * receive sequence is only partially assembed by the driver, it shall abort 14182 * the partially assembled frames for the sequence. Otherwise, if the 14183 * unsolicited receive sequence has been completely assembled and passed to 14184 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 14185 * unsolicited sequence has been aborted. After that, it will issue a basic 14186 * accept to accept the abort. 14187 **/ 14188 void 14189 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 14190 struct hbq_dmabuf *dmabuf) 14191 { 14192 struct lpfc_hba *phba = vport->phba; 14193 struct fc_frame_header fc_hdr; 14194 uint32_t fctl; 14195 bool abts_par; 14196 14197 /* Make a copy of fc_hdr before the dmabuf being released */ 14198 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 14199 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 14200 14201 if (fctl & FC_FC_EX_CTX) { 14202 /* 14203 * ABTS sent by responder to exchange, just free the buffer 14204 */ 14205 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14206 } else { 14207 /* 14208 * ABTS sent by initiator to exchange, need to do cleanup 14209 */ 14210 /* Try to abort partially assembled seq */ 14211 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); 14212 14213 /* Send abort to ULP if partially seq abort failed */ 14214 if (abts_par == false) 14215 lpfc_sli4_send_seq_to_ulp(vport, dmabuf); 14216 else 14217 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14218 } 14219 /* Send basic accept (BA_ACC) to the abort requester */ 14220 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); 14221 } 14222 14223 /** 14224 * lpfc_seq_complete - Indicates if a sequence is complete 14225 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14226 * 14227 * This function checks the sequence, starting with the frame described by 14228 * @dmabuf, to see if all the frames associated with this sequence are present. 14229 * the frames associated with this sequence are linked to the @dmabuf using the 14230 * dbuf list. This function looks for two major things. 1) That the first frame 14231 * has a sequence count of zero. 2) There is a frame with last frame of sequence 14232 * set. 3) That there are no holes in the sequence count. The function will 14233 * return 1 when the sequence is complete, otherwise it will return 0. 14234 **/ 14235 static int 14236 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 14237 { 14238 struct fc_frame_header *hdr; 14239 struct lpfc_dmabuf *d_buf; 14240 struct hbq_dmabuf *seq_dmabuf; 14241 uint32_t fctl; 14242 int seq_count = 0; 14243 14244 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14245 /* make sure first fame of sequence has a sequence count of zero */ 14246 if (hdr->fh_seq_cnt != seq_count) 14247 return 0; 14248 fctl = (hdr->fh_f_ctl[0] << 16 | 14249 hdr->fh_f_ctl[1] << 8 | 14250 hdr->fh_f_ctl[2]); 14251 /* If last frame of sequence we can return success. */ 14252 if (fctl & FC_FC_END_SEQ) 14253 return 1; 14254 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 14255 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14256 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14257 /* If there is a hole in the sequence count then fail. */ 14258 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 14259 return 0; 14260 fctl = (hdr->fh_f_ctl[0] << 16 | 14261 hdr->fh_f_ctl[1] << 8 | 14262 hdr->fh_f_ctl[2]); 14263 /* If last frame of sequence we can return success. */ 14264 if (fctl & FC_FC_END_SEQ) 14265 return 1; 14266 } 14267 return 0; 14268 } 14269 14270 /** 14271 * lpfc_prep_seq - Prep sequence for ULP processing 14272 * @vport: Pointer to the vport on which this sequence was received 14273 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14274 * 14275 * This function takes a sequence, described by a list of frames, and creates 14276 * a list of iocbq structures to describe the sequence. This iocbq list will be 14277 * used to issue to the generic unsolicited sequence handler. This routine 14278 * returns a pointer to the first iocbq in the list. If the function is unable 14279 * to allocate an iocbq then it throw out the received frames that were not 14280 * able to be described and return a pointer to the first iocbq. If unable to 14281 * allocate any iocbqs (including the first) this function will return NULL. 14282 **/ 14283 static struct lpfc_iocbq * 14284 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 14285 { 14286 struct hbq_dmabuf *hbq_buf; 14287 struct lpfc_dmabuf *d_buf, *n_buf; 14288 struct lpfc_iocbq *first_iocbq, *iocbq; 14289 struct fc_frame_header *fc_hdr; 14290 uint32_t sid; 14291 uint32_t len, tot_len; 14292 struct ulp_bde64 *pbde; 14293 14294 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14295 /* remove from receive buffer list */ 14296 list_del_init(&seq_dmabuf->hbuf.list); 14297 lpfc_update_rcv_time_stamp(vport); 14298 /* get the Remote Port's SID */ 14299 sid = sli4_sid_from_fc_hdr(fc_hdr); 14300 tot_len = 0; 14301 /* Get an iocbq struct to fill in. */ 14302 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 14303 if (first_iocbq) { 14304 /* Initialize the first IOCB. */ 14305 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 14306 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 14307 14308 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 14309 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 14310 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 14311 first_iocbq->iocb.un.rcvels.parmRo = 14312 sli4_did_from_fc_hdr(fc_hdr); 14313 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 14314 } else 14315 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 14316 first_iocbq->iocb.ulpContext = NO_XRI; 14317 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 14318 be16_to_cpu(fc_hdr->fh_ox_id); 14319 /* iocbq is prepped for internal consumption. Physical vpi. */ 14320 first_iocbq->iocb.unsli3.rcvsli3.vpi = 14321 vport->phba->vpi_ids[vport->vpi]; 14322 /* put the first buffer into the first IOCBq */ 14323 first_iocbq->context2 = &seq_dmabuf->dbuf; 14324 first_iocbq->context3 = NULL; 14325 first_iocbq->iocb.ulpBdeCount = 1; 14326 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14327 LPFC_DATA_BUF_SIZE; 14328 first_iocbq->iocb.un.rcvels.remoteID = sid; 14329 tot_len = bf_get(lpfc_rcqe_length, 14330 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 14331 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14332 } 14333 iocbq = first_iocbq; 14334 /* 14335 * Each IOCBq can have two Buffers assigned, so go through the list 14336 * of buffers for this sequence and save two buffers in each IOCBq 14337 */ 14338 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 14339 if (!iocbq) { 14340 lpfc_in_buf_free(vport->phba, d_buf); 14341 continue; 14342 } 14343 if (!iocbq->context3) { 14344 iocbq->context3 = d_buf; 14345 iocbq->iocb.ulpBdeCount++; 14346 pbde = (struct ulp_bde64 *) 14347 &iocbq->iocb.unsli3.sli3Words[4]; 14348 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 14349 14350 /* We need to get the size out of the right CQE */ 14351 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14352 len = bf_get(lpfc_rcqe_length, 14353 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14354 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 14355 tot_len += len; 14356 } else { 14357 iocbq = lpfc_sli_get_iocbq(vport->phba); 14358 if (!iocbq) { 14359 if (first_iocbq) { 14360 first_iocbq->iocb.ulpStatus = 14361 IOSTAT_FCP_RSP_ERROR; 14362 first_iocbq->iocb.un.ulpWord[4] = 14363 IOERR_NO_RESOURCES; 14364 } 14365 lpfc_in_buf_free(vport->phba, d_buf); 14366 continue; 14367 } 14368 iocbq->context2 = d_buf; 14369 iocbq->context3 = NULL; 14370 iocbq->iocb.ulpBdeCount = 1; 14371 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14372 LPFC_DATA_BUF_SIZE; 14373 14374 /* We need to get the size out of the right CQE */ 14375 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14376 len = bf_get(lpfc_rcqe_length, 14377 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14378 tot_len += len; 14379 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14380 14381 iocbq->iocb.un.rcvels.remoteID = sid; 14382 list_add_tail(&iocbq->list, &first_iocbq->list); 14383 } 14384 } 14385 return first_iocbq; 14386 } 14387 14388 static void 14389 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 14390 struct hbq_dmabuf *seq_dmabuf) 14391 { 14392 struct fc_frame_header *fc_hdr; 14393 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 14394 struct lpfc_hba *phba = vport->phba; 14395 14396 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14397 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 14398 if (!iocbq) { 14399 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14400 "2707 Ring %d handler: Failed to allocate " 14401 "iocb Rctl x%x Type x%x received\n", 14402 LPFC_ELS_RING, 14403 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 14404 return; 14405 } 14406 if (!lpfc_complete_unsol_iocb(phba, 14407 &phba->sli.ring[LPFC_ELS_RING], 14408 iocbq, fc_hdr->fh_r_ctl, 14409 fc_hdr->fh_type)) 14410 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14411 "2540 Ring %d handler: unexpected Rctl " 14412 "x%x Type x%x received\n", 14413 LPFC_ELS_RING, 14414 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 14415 14416 /* Free iocb created in lpfc_prep_seq */ 14417 list_for_each_entry_safe(curr_iocb, next_iocb, 14418 &iocbq->list, list) { 14419 list_del_init(&curr_iocb->list); 14420 lpfc_sli_release_iocbq(phba, curr_iocb); 14421 } 14422 lpfc_sli_release_iocbq(phba, iocbq); 14423 } 14424 14425 /** 14426 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 14427 * @phba: Pointer to HBA context object. 14428 * 14429 * This function is called with no lock held. This function processes all 14430 * the received buffers and gives it to upper layers when a received buffer 14431 * indicates that it is the final frame in the sequence. The interrupt 14432 * service routine processes received buffers at interrupt contexts and adds 14433 * received dma buffers to the rb_pend_list queue and signals the worker thread. 14434 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 14435 * appropriate receive function when the final frame in a sequence is received. 14436 **/ 14437 void 14438 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 14439 struct hbq_dmabuf *dmabuf) 14440 { 14441 struct hbq_dmabuf *seq_dmabuf; 14442 struct fc_frame_header *fc_hdr; 14443 struct lpfc_vport *vport; 14444 uint32_t fcfi; 14445 uint32_t did; 14446 14447 /* Process each received buffer */ 14448 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14449 /* check to see if this a valid type of frame */ 14450 if (lpfc_fc_frame_check(phba, fc_hdr)) { 14451 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14452 return; 14453 } 14454 if ((bf_get(lpfc_cqe_code, 14455 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 14456 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 14457 &dmabuf->cq_event.cqe.rcqe_cmpl); 14458 else 14459 fcfi = bf_get(lpfc_rcqe_fcf_id, 14460 &dmabuf->cq_event.cqe.rcqe_cmpl); 14461 14462 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 14463 if (!vport) { 14464 /* throw out the frame */ 14465 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14466 return; 14467 } 14468 14469 /* d_id this frame is directed to */ 14470 did = sli4_did_from_fc_hdr(fc_hdr); 14471 14472 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 14473 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 14474 (did != Fabric_DID)) { 14475 /* 14476 * Throw out the frame if we are not pt2pt. 14477 * The pt2pt protocol allows for discovery frames 14478 * to be received without a registered VPI. 14479 */ 14480 if (!(vport->fc_flag & FC_PT2PT) || 14481 (phba->link_state == LPFC_HBA_READY)) { 14482 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14483 return; 14484 } 14485 } 14486 14487 /* Handle the basic abort sequence (BA_ABTS) event */ 14488 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 14489 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 14490 return; 14491 } 14492 14493 /* Link this frame */ 14494 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 14495 if (!seq_dmabuf) { 14496 /* unable to add frame to vport - throw it out */ 14497 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14498 return; 14499 } 14500 /* If not last frame in sequence continue processing frames. */ 14501 if (!lpfc_seq_complete(seq_dmabuf)) 14502 return; 14503 14504 /* Send the complete sequence to the upper layer protocol */ 14505 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 14506 } 14507 14508 /** 14509 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 14510 * @phba: pointer to lpfc hba data structure. 14511 * 14512 * This routine is invoked to post rpi header templates to the 14513 * HBA consistent with the SLI-4 interface spec. This routine 14514 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14515 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14516 * 14517 * This routine does not require any locks. It's usage is expected 14518 * to be driver load or reset recovery when the driver is 14519 * sequential. 14520 * 14521 * Return codes 14522 * 0 - successful 14523 * -EIO - The mailbox failed to complete successfully. 14524 * When this error occurs, the driver is not guaranteed 14525 * to have any rpi regions posted to the device and 14526 * must either attempt to repost the regions or take a 14527 * fatal error. 14528 **/ 14529 int 14530 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 14531 { 14532 struct lpfc_rpi_hdr *rpi_page; 14533 uint32_t rc = 0; 14534 uint16_t lrpi = 0; 14535 14536 /* SLI4 ports that support extents do not require RPI headers. */ 14537 if (!phba->sli4_hba.rpi_hdrs_in_use) 14538 goto exit; 14539 if (phba->sli4_hba.extents_in_use) 14540 return -EIO; 14541 14542 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 14543 /* 14544 * Assign the rpi headers a physical rpi only if the driver 14545 * has not initialized those resources. A port reset only 14546 * needs the headers posted. 14547 */ 14548 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 14549 LPFC_RPI_RSRC_RDY) 14550 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14551 14552 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 14553 if (rc != MBX_SUCCESS) { 14554 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14555 "2008 Error %d posting all rpi " 14556 "headers\n", rc); 14557 rc = -EIO; 14558 break; 14559 } 14560 } 14561 14562 exit: 14563 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 14564 LPFC_RPI_RSRC_RDY); 14565 return rc; 14566 } 14567 14568 /** 14569 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 14570 * @phba: pointer to lpfc hba data structure. 14571 * @rpi_page: pointer to the rpi memory region. 14572 * 14573 * This routine is invoked to post a single rpi header to the 14574 * HBA consistent with the SLI-4 interface spec. This memory region 14575 * maps up to 64 rpi context regions. 14576 * 14577 * Return codes 14578 * 0 - successful 14579 * -ENOMEM - No available memory 14580 * -EIO - The mailbox failed to complete successfully. 14581 **/ 14582 int 14583 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 14584 { 14585 LPFC_MBOXQ_t *mboxq; 14586 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 14587 uint32_t rc = 0; 14588 uint32_t shdr_status, shdr_add_status; 14589 union lpfc_sli4_cfg_shdr *shdr; 14590 14591 /* SLI4 ports that support extents do not require RPI headers. */ 14592 if (!phba->sli4_hba.rpi_hdrs_in_use) 14593 return rc; 14594 if (phba->sli4_hba.extents_in_use) 14595 return -EIO; 14596 14597 /* The port is notified of the header region via a mailbox command. */ 14598 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14599 if (!mboxq) { 14600 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14601 "2001 Unable to allocate memory for issuing " 14602 "SLI_CONFIG_SPECIAL mailbox command\n"); 14603 return -ENOMEM; 14604 } 14605 14606 /* Post all rpi memory regions to the port. */ 14607 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 14608 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14609 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 14610 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 14611 sizeof(struct lpfc_sli4_cfg_mhdr), 14612 LPFC_SLI4_MBX_EMBED); 14613 14614 14615 /* Post the physical rpi to the port for this rpi header. */ 14616 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 14617 rpi_page->start_rpi); 14618 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 14619 hdr_tmpl, rpi_page->page_count); 14620 14621 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 14622 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 14623 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 14624 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 14625 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14626 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14627 if (rc != MBX_TIMEOUT) 14628 mempool_free(mboxq, phba->mbox_mem_pool); 14629 if (shdr_status || shdr_add_status || rc) { 14630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14631 "2514 POST_RPI_HDR mailbox failed with " 14632 "status x%x add_status x%x, mbx status x%x\n", 14633 shdr_status, shdr_add_status, rc); 14634 rc = -ENXIO; 14635 } 14636 return rc; 14637 } 14638 14639 /** 14640 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 14641 * @phba: pointer to lpfc hba data structure. 14642 * 14643 * This routine is invoked to post rpi header templates to the 14644 * HBA consistent with the SLI-4 interface spec. This routine 14645 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14646 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14647 * 14648 * Returns 14649 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 14650 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 14651 **/ 14652 int 14653 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 14654 { 14655 unsigned long rpi; 14656 uint16_t max_rpi, rpi_limit; 14657 uint16_t rpi_remaining, lrpi = 0; 14658 struct lpfc_rpi_hdr *rpi_hdr; 14659 14660 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 14661 rpi_limit = phba->sli4_hba.next_rpi; 14662 14663 /* 14664 * Fetch the next logical rpi. Because this index is logical, 14665 * the driver starts at 0 each time. 14666 */ 14667 spin_lock_irq(&phba->hbalock); 14668 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 14669 if (rpi >= rpi_limit) 14670 rpi = LPFC_RPI_ALLOC_ERROR; 14671 else { 14672 set_bit(rpi, phba->sli4_hba.rpi_bmask); 14673 phba->sli4_hba.max_cfg_param.rpi_used++; 14674 phba->sli4_hba.rpi_count++; 14675 } 14676 14677 /* 14678 * Don't try to allocate more rpi header regions if the device limit 14679 * has been exhausted. 14680 */ 14681 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 14682 (phba->sli4_hba.rpi_count >= max_rpi)) { 14683 spin_unlock_irq(&phba->hbalock); 14684 return rpi; 14685 } 14686 14687 /* 14688 * RPI header postings are not required for SLI4 ports capable of 14689 * extents. 14690 */ 14691 if (!phba->sli4_hba.rpi_hdrs_in_use) { 14692 spin_unlock_irq(&phba->hbalock); 14693 return rpi; 14694 } 14695 14696 /* 14697 * If the driver is running low on rpi resources, allocate another 14698 * page now. Note that the next_rpi value is used because 14699 * it represents how many are actually in use whereas max_rpi notes 14700 * how many are supported max by the device. 14701 */ 14702 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 14703 spin_unlock_irq(&phba->hbalock); 14704 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14705 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14706 if (!rpi_hdr) { 14707 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14708 "2002 Error Could not grow rpi " 14709 "count\n"); 14710 } else { 14711 lrpi = rpi_hdr->start_rpi; 14712 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14713 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14714 } 14715 } 14716 14717 return rpi; 14718 } 14719 14720 /** 14721 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14722 * @phba: pointer to lpfc hba data structure. 14723 * 14724 * This routine is invoked to release an rpi to the pool of 14725 * available rpis maintained by the driver. 14726 **/ 14727 void 14728 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14729 { 14730 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 14731 phba->sli4_hba.rpi_count--; 14732 phba->sli4_hba.max_cfg_param.rpi_used--; 14733 } 14734 } 14735 14736 /** 14737 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14738 * @phba: pointer to lpfc hba data structure. 14739 * 14740 * This routine is invoked to release an rpi to the pool of 14741 * available rpis maintained by the driver. 14742 **/ 14743 void 14744 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14745 { 14746 spin_lock_irq(&phba->hbalock); 14747 __lpfc_sli4_free_rpi(phba, rpi); 14748 spin_unlock_irq(&phba->hbalock); 14749 } 14750 14751 /** 14752 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 14753 * @phba: pointer to lpfc hba data structure. 14754 * 14755 * This routine is invoked to remove the memory region that 14756 * provided rpi via a bitmask. 14757 **/ 14758 void 14759 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14760 { 14761 kfree(phba->sli4_hba.rpi_bmask); 14762 kfree(phba->sli4_hba.rpi_ids); 14763 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 14764 } 14765 14766 /** 14767 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 14768 * @phba: pointer to lpfc hba data structure. 14769 * 14770 * This routine is invoked to remove the memory region that 14771 * provided rpi via a bitmask. 14772 **/ 14773 int 14774 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 14775 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 14776 { 14777 LPFC_MBOXQ_t *mboxq; 14778 struct lpfc_hba *phba = ndlp->phba; 14779 int rc; 14780 14781 /* The port is notified of the header region via a mailbox command. */ 14782 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14783 if (!mboxq) 14784 return -ENOMEM; 14785 14786 /* Post all rpi memory regions to the port. */ 14787 lpfc_resume_rpi(mboxq, ndlp); 14788 if (cmpl) { 14789 mboxq->mbox_cmpl = cmpl; 14790 mboxq->context1 = arg; 14791 mboxq->context2 = ndlp; 14792 } else 14793 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14794 mboxq->vport = ndlp->vport; 14795 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14796 if (rc == MBX_NOT_FINISHED) { 14797 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14798 "2010 Resume RPI Mailbox failed " 14799 "status %d, mbxStatus x%x\n", rc, 14800 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14801 mempool_free(mboxq, phba->mbox_mem_pool); 14802 return -EIO; 14803 } 14804 return 0; 14805 } 14806 14807 /** 14808 * lpfc_sli4_init_vpi - Initialize a vpi with the port 14809 * @vport: Pointer to the vport for which the vpi is being initialized 14810 * 14811 * This routine is invoked to activate a vpi with the port. 14812 * 14813 * Returns: 14814 * 0 success 14815 * -Evalue otherwise 14816 **/ 14817 int 14818 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 14819 { 14820 LPFC_MBOXQ_t *mboxq; 14821 int rc = 0; 14822 int retval = MBX_SUCCESS; 14823 uint32_t mbox_tmo; 14824 struct lpfc_hba *phba = vport->phba; 14825 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14826 if (!mboxq) 14827 return -ENOMEM; 14828 lpfc_init_vpi(phba, mboxq, vport->vpi); 14829 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 14830 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 14831 if (rc != MBX_SUCCESS) { 14832 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 14833 "2022 INIT VPI Mailbox failed " 14834 "status %d, mbxStatus x%x\n", rc, 14835 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14836 retval = -EIO; 14837 } 14838 if (rc != MBX_TIMEOUT) 14839 mempool_free(mboxq, vport->phba->mbox_mem_pool); 14840 14841 return retval; 14842 } 14843 14844 /** 14845 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 14846 * @phba: pointer to lpfc hba data structure. 14847 * @mboxq: Pointer to mailbox object. 14848 * 14849 * This routine is invoked to manually add a single FCF record. The caller 14850 * must pass a completely initialized FCF_Record. This routine takes 14851 * care of the nonembedded mailbox operations. 14852 **/ 14853 static void 14854 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 14855 { 14856 void *virt_addr; 14857 union lpfc_sli4_cfg_shdr *shdr; 14858 uint32_t shdr_status, shdr_add_status; 14859 14860 virt_addr = mboxq->sge_array->addr[0]; 14861 /* The IOCTL status is embedded in the mailbox subheader. */ 14862 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 14863 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14864 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14865 14866 if ((shdr_status || shdr_add_status) && 14867 (shdr_status != STATUS_FCF_IN_USE)) 14868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14869 "2558 ADD_FCF_RECORD mailbox failed with " 14870 "status x%x add_status x%x\n", 14871 shdr_status, shdr_add_status); 14872 14873 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14874 } 14875 14876 /** 14877 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 14878 * @phba: pointer to lpfc hba data structure. 14879 * @fcf_record: pointer to the initialized fcf record to add. 14880 * 14881 * This routine is invoked to manually add a single FCF record. The caller 14882 * must pass a completely initialized FCF_Record. This routine takes 14883 * care of the nonembedded mailbox operations. 14884 **/ 14885 int 14886 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 14887 { 14888 int rc = 0; 14889 LPFC_MBOXQ_t *mboxq; 14890 uint8_t *bytep; 14891 void *virt_addr; 14892 dma_addr_t phys_addr; 14893 struct lpfc_mbx_sge sge; 14894 uint32_t alloc_len, req_len; 14895 uint32_t fcfindex; 14896 14897 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14898 if (!mboxq) { 14899 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14900 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 14901 return -ENOMEM; 14902 } 14903 14904 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 14905 sizeof(uint32_t); 14906 14907 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14908 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14909 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 14910 req_len, LPFC_SLI4_MBX_NEMBED); 14911 if (alloc_len < req_len) { 14912 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14913 "2523 Allocated DMA memory size (x%x) is " 14914 "less than the requested DMA memory " 14915 "size (x%x)\n", alloc_len, req_len); 14916 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14917 return -ENOMEM; 14918 } 14919 14920 /* 14921 * Get the first SGE entry from the non-embedded DMA memory. This 14922 * routine only uses a single SGE. 14923 */ 14924 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 14925 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 14926 virt_addr = mboxq->sge_array->addr[0]; 14927 /* 14928 * Configure the FCF record for FCFI 0. This is the driver's 14929 * hardcoded default and gets used in nonFIP mode. 14930 */ 14931 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 14932 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 14933 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 14934 14935 /* 14936 * Copy the fcf_index and the FCF Record Data. The data starts after 14937 * the FCoE header plus word10. The data copy needs to be endian 14938 * correct. 14939 */ 14940 bytep += sizeof(uint32_t); 14941 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 14942 mboxq->vport = phba->pport; 14943 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 14944 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14945 if (rc == MBX_NOT_FINISHED) { 14946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14947 "2515 ADD_FCF_RECORD mailbox failed with " 14948 "status 0x%x\n", rc); 14949 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14950 rc = -EIO; 14951 } else 14952 rc = 0; 14953 14954 return rc; 14955 } 14956 14957 /** 14958 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 14959 * @phba: pointer to lpfc hba data structure. 14960 * @fcf_record: pointer to the fcf record to write the default data. 14961 * @fcf_index: FCF table entry index. 14962 * 14963 * This routine is invoked to build the driver's default FCF record. The 14964 * values used are hardcoded. This routine handles memory initialization. 14965 * 14966 **/ 14967 void 14968 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 14969 struct fcf_record *fcf_record, 14970 uint16_t fcf_index) 14971 { 14972 memset(fcf_record, 0, sizeof(struct fcf_record)); 14973 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 14974 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 14975 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 14976 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 14977 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 14978 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 14979 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 14980 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 14981 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 14982 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 14983 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 14984 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 14985 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 14986 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 14987 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 14988 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 14989 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 14990 /* Set the VLAN bit map */ 14991 if (phba->valid_vlan) { 14992 fcf_record->vlan_bitmap[phba->vlan_id / 8] 14993 = 1 << (phba->vlan_id % 8); 14994 } 14995 } 14996 14997 /** 14998 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 14999 * @phba: pointer to lpfc hba data structure. 15000 * @fcf_index: FCF table entry offset. 15001 * 15002 * This routine is invoked to scan the entire FCF table by reading FCF 15003 * record and processing it one at a time starting from the @fcf_index 15004 * for initial FCF discovery or fast FCF failover rediscovery. 15005 * 15006 * Return 0 if the mailbox command is submitted successfully, none 0 15007 * otherwise. 15008 **/ 15009 int 15010 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15011 { 15012 int rc = 0, error; 15013 LPFC_MBOXQ_t *mboxq; 15014 15015 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 15016 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 15017 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15018 if (!mboxq) { 15019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15020 "2000 Failed to allocate mbox for " 15021 "READ_FCF cmd\n"); 15022 error = -ENOMEM; 15023 goto fail_fcf_scan; 15024 } 15025 /* Construct the read FCF record mailbox command */ 15026 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15027 if (rc) { 15028 error = -EINVAL; 15029 goto fail_fcf_scan; 15030 } 15031 /* Issue the mailbox command asynchronously */ 15032 mboxq->vport = phba->pport; 15033 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 15034 15035 spin_lock_irq(&phba->hbalock); 15036 phba->hba_flag |= FCF_TS_INPROG; 15037 spin_unlock_irq(&phba->hbalock); 15038 15039 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15040 if (rc == MBX_NOT_FINISHED) 15041 error = -EIO; 15042 else { 15043 /* Reset eligible FCF count for new scan */ 15044 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 15045 phba->fcf.eligible_fcf_cnt = 0; 15046 error = 0; 15047 } 15048 fail_fcf_scan: 15049 if (error) { 15050 if (mboxq) 15051 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15052 /* FCF scan failed, clear FCF_TS_INPROG flag */ 15053 spin_lock_irq(&phba->hbalock); 15054 phba->hba_flag &= ~FCF_TS_INPROG; 15055 spin_unlock_irq(&phba->hbalock); 15056 } 15057 return error; 15058 } 15059 15060 /** 15061 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 15062 * @phba: pointer to lpfc hba data structure. 15063 * @fcf_index: FCF table entry offset. 15064 * 15065 * This routine is invoked to read an FCF record indicated by @fcf_index 15066 * and to use it for FLOGI roundrobin FCF failover. 15067 * 15068 * Return 0 if the mailbox command is submitted successfully, none 0 15069 * otherwise. 15070 **/ 15071 int 15072 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15073 { 15074 int rc = 0, error; 15075 LPFC_MBOXQ_t *mboxq; 15076 15077 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15078 if (!mboxq) { 15079 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15080 "2763 Failed to allocate mbox for " 15081 "READ_FCF cmd\n"); 15082 error = -ENOMEM; 15083 goto fail_fcf_read; 15084 } 15085 /* Construct the read FCF record mailbox command */ 15086 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15087 if (rc) { 15088 error = -EINVAL; 15089 goto fail_fcf_read; 15090 } 15091 /* Issue the mailbox command asynchronously */ 15092 mboxq->vport = phba->pport; 15093 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 15094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15095 if (rc == MBX_NOT_FINISHED) 15096 error = -EIO; 15097 else 15098 error = 0; 15099 15100 fail_fcf_read: 15101 if (error && mboxq) 15102 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15103 return error; 15104 } 15105 15106 /** 15107 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 15108 * @phba: pointer to lpfc hba data structure. 15109 * @fcf_index: FCF table entry offset. 15110 * 15111 * This routine is invoked to read an FCF record indicated by @fcf_index to 15112 * determine whether it's eligible for FLOGI roundrobin failover list. 15113 * 15114 * Return 0 if the mailbox command is submitted successfully, none 0 15115 * otherwise. 15116 **/ 15117 int 15118 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15119 { 15120 int rc = 0, error; 15121 LPFC_MBOXQ_t *mboxq; 15122 15123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15124 if (!mboxq) { 15125 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15126 "2758 Failed to allocate mbox for " 15127 "READ_FCF cmd\n"); 15128 error = -ENOMEM; 15129 goto fail_fcf_read; 15130 } 15131 /* Construct the read FCF record mailbox command */ 15132 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15133 if (rc) { 15134 error = -EINVAL; 15135 goto fail_fcf_read; 15136 } 15137 /* Issue the mailbox command asynchronously */ 15138 mboxq->vport = phba->pport; 15139 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 15140 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15141 if (rc == MBX_NOT_FINISHED) 15142 error = -EIO; 15143 else 15144 error = 0; 15145 15146 fail_fcf_read: 15147 if (error && mboxq) 15148 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15149 return error; 15150 } 15151 15152 /** 15153 * lpfc_check_next_fcf_pri 15154 * phba pointer to the lpfc_hba struct for this port. 15155 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 15156 * routine when the rr_bmask is empty. The FCF indecies are put into the 15157 * rr_bmask based on their priority level. Starting from the highest priority 15158 * to the lowest. The most likely FCF candidate will be in the highest 15159 * priority group. When this routine is called it searches the fcf_pri list for 15160 * next lowest priority group and repopulates the rr_bmask with only those 15161 * fcf_indexes. 15162 * returns: 15163 * 1=success 0=failure 15164 **/ 15165 int 15166 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 15167 { 15168 uint16_t next_fcf_pri; 15169 uint16_t last_index; 15170 struct lpfc_fcf_pri *fcf_pri; 15171 int rc; 15172 int ret = 0; 15173 15174 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 15175 LPFC_SLI4_FCF_TBL_INDX_MAX); 15176 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15177 "3060 Last IDX %d\n", last_index); 15178 if (list_empty(&phba->fcf.fcf_pri_list)) { 15179 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15180 "3061 Last IDX %d\n", last_index); 15181 return 0; /* Empty rr list */ 15182 } 15183 next_fcf_pri = 0; 15184 /* 15185 * Clear the rr_bmask and set all of the bits that are at this 15186 * priority. 15187 */ 15188 memset(phba->fcf.fcf_rr_bmask, 0, 15189 sizeof(*phba->fcf.fcf_rr_bmask)); 15190 spin_lock_irq(&phba->hbalock); 15191 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15192 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 15193 continue; 15194 /* 15195 * the 1st priority that has not FLOGI failed 15196 * will be the highest. 15197 */ 15198 if (!next_fcf_pri) 15199 next_fcf_pri = fcf_pri->fcf_rec.priority; 15200 spin_unlock_irq(&phba->hbalock); 15201 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15202 rc = lpfc_sli4_fcf_rr_index_set(phba, 15203 fcf_pri->fcf_rec.fcf_index); 15204 if (rc) 15205 return 0; 15206 } 15207 spin_lock_irq(&phba->hbalock); 15208 } 15209 /* 15210 * if next_fcf_pri was not set above and the list is not empty then 15211 * we have failed flogis on all of them. So reset flogi failed 15212 * and start at the begining. 15213 */ 15214 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 15215 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15216 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 15217 /* 15218 * the 1st priority that has not FLOGI failed 15219 * will be the highest. 15220 */ 15221 if (!next_fcf_pri) 15222 next_fcf_pri = fcf_pri->fcf_rec.priority; 15223 spin_unlock_irq(&phba->hbalock); 15224 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15225 rc = lpfc_sli4_fcf_rr_index_set(phba, 15226 fcf_pri->fcf_rec.fcf_index); 15227 if (rc) 15228 return 0; 15229 } 15230 spin_lock_irq(&phba->hbalock); 15231 } 15232 } else 15233 ret = 1; 15234 spin_unlock_irq(&phba->hbalock); 15235 15236 return ret; 15237 } 15238 /** 15239 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 15240 * @phba: pointer to lpfc hba data structure. 15241 * 15242 * This routine is to get the next eligible FCF record index in a round 15243 * robin fashion. If the next eligible FCF record index equals to the 15244 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 15245 * shall be returned, otherwise, the next eligible FCF record's index 15246 * shall be returned. 15247 **/ 15248 uint16_t 15249 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 15250 { 15251 uint16_t next_fcf_index; 15252 15253 /* Search start from next bit of currently registered FCF index */ 15254 next_priority: 15255 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 15256 LPFC_SLI4_FCF_TBL_INDX_MAX; 15257 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15258 LPFC_SLI4_FCF_TBL_INDX_MAX, 15259 next_fcf_index); 15260 15261 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 15262 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15263 /* 15264 * If we have wrapped then we need to clear the bits that 15265 * have been tested so that we can detect when we should 15266 * change the priority level. 15267 */ 15268 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15269 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 15270 } 15271 15272 15273 /* Check roundrobin failover list empty condition */ 15274 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 15275 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 15276 /* 15277 * If next fcf index is not found check if there are lower 15278 * Priority level fcf's in the fcf_priority list. 15279 * Set up the rr_bmask with all of the avaiable fcf bits 15280 * at that level and continue the selection process. 15281 */ 15282 if (lpfc_check_next_fcf_pri_level(phba)) 15283 goto next_priority; 15284 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15285 "2844 No roundrobin failover FCF available\n"); 15286 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 15287 return LPFC_FCOE_FCF_NEXT_NONE; 15288 else { 15289 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15290 "3063 Only FCF available idx %d, flag %x\n", 15291 next_fcf_index, 15292 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 15293 return next_fcf_index; 15294 } 15295 } 15296 15297 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 15298 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 15299 LPFC_FCF_FLOGI_FAILED) 15300 goto next_priority; 15301 15302 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15303 "2845 Get next roundrobin failover FCF (x%x)\n", 15304 next_fcf_index); 15305 15306 return next_fcf_index; 15307 } 15308 15309 /** 15310 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 15311 * @phba: pointer to lpfc hba data structure. 15312 * 15313 * This routine sets the FCF record index in to the eligible bmask for 15314 * roundrobin failover search. It checks to make sure that the index 15315 * does not go beyond the range of the driver allocated bmask dimension 15316 * before setting the bit. 15317 * 15318 * Returns 0 if the index bit successfully set, otherwise, it returns 15319 * -EINVAL. 15320 **/ 15321 int 15322 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 15323 { 15324 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15325 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15326 "2610 FCF (x%x) reached driver's book " 15327 "keeping dimension:x%x\n", 15328 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15329 return -EINVAL; 15330 } 15331 /* Set the eligible FCF record index bmask */ 15332 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15333 15334 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15335 "2790 Set FCF (x%x) to roundrobin FCF failover " 15336 "bmask\n", fcf_index); 15337 15338 return 0; 15339 } 15340 15341 /** 15342 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 15343 * @phba: pointer to lpfc hba data structure. 15344 * 15345 * This routine clears the FCF record index from the eligible bmask for 15346 * roundrobin failover search. It checks to make sure that the index 15347 * does not go beyond the range of the driver allocated bmask dimension 15348 * before clearing the bit. 15349 **/ 15350 void 15351 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 15352 { 15353 struct lpfc_fcf_pri *fcf_pri; 15354 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15355 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15356 "2762 FCF (x%x) reached driver's book " 15357 "keeping dimension:x%x\n", 15358 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15359 return; 15360 } 15361 /* Clear the eligible FCF record index bmask */ 15362 spin_lock_irq(&phba->hbalock); 15363 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15364 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 15365 list_del_init(&fcf_pri->list); 15366 break; 15367 } 15368 } 15369 spin_unlock_irq(&phba->hbalock); 15370 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15371 15372 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15373 "2791 Clear FCF (x%x) from roundrobin failover " 15374 "bmask\n", fcf_index); 15375 } 15376 15377 /** 15378 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 15379 * @phba: pointer to lpfc hba data structure. 15380 * 15381 * This routine is the completion routine for the rediscover FCF table mailbox 15382 * command. If the mailbox command returned failure, it will try to stop the 15383 * FCF rediscover wait timer. 15384 **/ 15385 void 15386 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 15387 { 15388 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 15389 uint32_t shdr_status, shdr_add_status; 15390 15391 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 15392 15393 shdr_status = bf_get(lpfc_mbox_hdr_status, 15394 &redisc_fcf->header.cfg_shdr.response); 15395 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 15396 &redisc_fcf->header.cfg_shdr.response); 15397 if (shdr_status || shdr_add_status) { 15398 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15399 "2746 Requesting for FCF rediscovery failed " 15400 "status x%x add_status x%x\n", 15401 shdr_status, shdr_add_status); 15402 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 15403 spin_lock_irq(&phba->hbalock); 15404 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 15405 spin_unlock_irq(&phba->hbalock); 15406 /* 15407 * CVL event triggered FCF rediscover request failed, 15408 * last resort to re-try current registered FCF entry. 15409 */ 15410 lpfc_retry_pport_discovery(phba); 15411 } else { 15412 spin_lock_irq(&phba->hbalock); 15413 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 15414 spin_unlock_irq(&phba->hbalock); 15415 /* 15416 * DEAD FCF event triggered FCF rediscover request 15417 * failed, last resort to fail over as a link down 15418 * to FCF registration. 15419 */ 15420 lpfc_sli4_fcf_dead_failthrough(phba); 15421 } 15422 } else { 15423 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15424 "2775 Start FCF rediscover quiescent timer\n"); 15425 /* 15426 * Start FCF rediscovery wait timer for pending FCF 15427 * before rescan FCF record table. 15428 */ 15429 lpfc_fcf_redisc_wait_start_timer(phba); 15430 } 15431 15432 mempool_free(mbox, phba->mbox_mem_pool); 15433 } 15434 15435 /** 15436 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 15437 * @phba: pointer to lpfc hba data structure. 15438 * 15439 * This routine is invoked to request for rediscovery of the entire FCF table 15440 * by the port. 15441 **/ 15442 int 15443 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 15444 { 15445 LPFC_MBOXQ_t *mbox; 15446 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 15447 int rc, length; 15448 15449 /* Cancel retry delay timers to all vports before FCF rediscover */ 15450 lpfc_cancel_all_vport_retry_delay_timer(phba); 15451 15452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15453 if (!mbox) { 15454 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15455 "2745 Failed to allocate mbox for " 15456 "requesting FCF rediscover.\n"); 15457 return -ENOMEM; 15458 } 15459 15460 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 15461 sizeof(struct lpfc_sli4_cfg_mhdr)); 15462 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15463 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 15464 length, LPFC_SLI4_MBX_EMBED); 15465 15466 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 15467 /* Set count to 0 for invalidating the entire FCF database */ 15468 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 15469 15470 /* Issue the mailbox command asynchronously */ 15471 mbox->vport = phba->pport; 15472 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 15473 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 15474 15475 if (rc == MBX_NOT_FINISHED) { 15476 mempool_free(mbox, phba->mbox_mem_pool); 15477 return -EIO; 15478 } 15479 return 0; 15480 } 15481 15482 /** 15483 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 15484 * @phba: pointer to lpfc hba data structure. 15485 * 15486 * This function is the failover routine as a last resort to the FCF DEAD 15487 * event when driver failed to perform fast FCF failover. 15488 **/ 15489 void 15490 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 15491 { 15492 uint32_t link_state; 15493 15494 /* 15495 * Last resort as FCF DEAD event failover will treat this as 15496 * a link down, but save the link state because we don't want 15497 * it to be changed to Link Down unless it is already down. 15498 */ 15499 link_state = phba->link_state; 15500 lpfc_linkdown(phba); 15501 phba->link_state = link_state; 15502 15503 /* Unregister FCF if no devices connected to it */ 15504 lpfc_unregister_unused_fcf(phba); 15505 } 15506 15507 /** 15508 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 15509 * @phba: pointer to lpfc hba data structure. 15510 * @rgn23_data: pointer to configure region 23 data. 15511 * 15512 * This function gets SLI3 port configure region 23 data through memory dump 15513 * mailbox command. When it successfully retrieves data, the size of the data 15514 * will be returned, otherwise, 0 will be returned. 15515 **/ 15516 static uint32_t 15517 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 15518 { 15519 LPFC_MBOXQ_t *pmb = NULL; 15520 MAILBOX_t *mb; 15521 uint32_t offset = 0; 15522 int rc; 15523 15524 if (!rgn23_data) 15525 return 0; 15526 15527 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15528 if (!pmb) { 15529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15530 "2600 failed to allocate mailbox memory\n"); 15531 return 0; 15532 } 15533 mb = &pmb->u.mb; 15534 15535 do { 15536 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 15537 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 15538 15539 if (rc != MBX_SUCCESS) { 15540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15541 "2601 failed to read config " 15542 "region 23, rc 0x%x Status 0x%x\n", 15543 rc, mb->mbxStatus); 15544 mb->un.varDmp.word_cnt = 0; 15545 } 15546 /* 15547 * dump mem may return a zero when finished or we got a 15548 * mailbox error, either way we are done. 15549 */ 15550 if (mb->un.varDmp.word_cnt == 0) 15551 break; 15552 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 15553 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 15554 15555 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 15556 rgn23_data + offset, 15557 mb->un.varDmp.word_cnt); 15558 offset += mb->un.varDmp.word_cnt; 15559 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 15560 15561 mempool_free(pmb, phba->mbox_mem_pool); 15562 return offset; 15563 } 15564 15565 /** 15566 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 15567 * @phba: pointer to lpfc hba data structure. 15568 * @rgn23_data: pointer to configure region 23 data. 15569 * 15570 * This function gets SLI4 port configure region 23 data through memory dump 15571 * mailbox command. When it successfully retrieves data, the size of the data 15572 * will be returned, otherwise, 0 will be returned. 15573 **/ 15574 static uint32_t 15575 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 15576 { 15577 LPFC_MBOXQ_t *mboxq = NULL; 15578 struct lpfc_dmabuf *mp = NULL; 15579 struct lpfc_mqe *mqe; 15580 uint32_t data_length = 0; 15581 int rc; 15582 15583 if (!rgn23_data) 15584 return 0; 15585 15586 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15587 if (!mboxq) { 15588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15589 "3105 failed to allocate mailbox memory\n"); 15590 return 0; 15591 } 15592 15593 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 15594 goto out; 15595 mqe = &mboxq->u.mqe; 15596 mp = (struct lpfc_dmabuf *) mboxq->context1; 15597 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 15598 if (rc) 15599 goto out; 15600 data_length = mqe->un.mb_words[5]; 15601 if (data_length == 0) 15602 goto out; 15603 if (data_length > DMP_RGN23_SIZE) { 15604 data_length = 0; 15605 goto out; 15606 } 15607 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 15608 out: 15609 mempool_free(mboxq, phba->mbox_mem_pool); 15610 if (mp) { 15611 lpfc_mbuf_free(phba, mp->virt, mp->phys); 15612 kfree(mp); 15613 } 15614 return data_length; 15615 } 15616 15617 /** 15618 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 15619 * @phba: pointer to lpfc hba data structure. 15620 * 15621 * This function read region 23 and parse TLV for port status to 15622 * decide if the user disaled the port. If the TLV indicates the 15623 * port is disabled, the hba_flag is set accordingly. 15624 **/ 15625 void 15626 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 15627 { 15628 uint8_t *rgn23_data = NULL; 15629 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 15630 uint32_t offset = 0; 15631 15632 /* Get adapter Region 23 data */ 15633 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 15634 if (!rgn23_data) 15635 goto out; 15636 15637 if (phba->sli_rev < LPFC_SLI_REV4) 15638 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 15639 else { 15640 if_type = bf_get(lpfc_sli_intf_if_type, 15641 &phba->sli4_hba.sli_intf); 15642 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 15643 goto out; 15644 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 15645 } 15646 15647 if (!data_size) 15648 goto out; 15649 15650 /* Check the region signature first */ 15651 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 15652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15653 "2619 Config region 23 has bad signature\n"); 15654 goto out; 15655 } 15656 offset += 4; 15657 15658 /* Check the data structure version */ 15659 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 15660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15661 "2620 Config region 23 has bad version\n"); 15662 goto out; 15663 } 15664 offset += 4; 15665 15666 /* Parse TLV entries in the region */ 15667 while (offset < data_size) { 15668 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 15669 break; 15670 /* 15671 * If the TLV is not driver specific TLV or driver id is 15672 * not linux driver id, skip the record. 15673 */ 15674 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 15675 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 15676 (rgn23_data[offset + 3] != 0)) { 15677 offset += rgn23_data[offset + 1] * 4 + 4; 15678 continue; 15679 } 15680 15681 /* Driver found a driver specific TLV in the config region */ 15682 sub_tlv_len = rgn23_data[offset + 1] * 4; 15683 offset += 4; 15684 tlv_offset = 0; 15685 15686 /* 15687 * Search for configured port state sub-TLV. 15688 */ 15689 while ((offset < data_size) && 15690 (tlv_offset < sub_tlv_len)) { 15691 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 15692 offset += 4; 15693 tlv_offset += 4; 15694 break; 15695 } 15696 if (rgn23_data[offset] != PORT_STE_TYPE) { 15697 offset += rgn23_data[offset + 1] * 4 + 4; 15698 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 15699 continue; 15700 } 15701 15702 /* This HBA contains PORT_STE configured */ 15703 if (!rgn23_data[offset + 2]) 15704 phba->hba_flag |= LINK_DISABLED; 15705 15706 goto out; 15707 } 15708 } 15709 15710 out: 15711 kfree(rgn23_data); 15712 return; 15713 } 15714 15715 /** 15716 * lpfc_wr_object - write an object to the firmware 15717 * @phba: HBA structure that indicates port to create a queue on. 15718 * @dmabuf_list: list of dmabufs to write to the port. 15719 * @size: the total byte value of the objects to write to the port. 15720 * @offset: the current offset to be used to start the transfer. 15721 * 15722 * This routine will create a wr_object mailbox command to send to the port. 15723 * the mailbox command will be constructed using the dma buffers described in 15724 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 15725 * BDEs that the imbedded mailbox can support. The @offset variable will be 15726 * used to indicate the starting offset of the transfer and will also return 15727 * the offset after the write object mailbox has completed. @size is used to 15728 * determine the end of the object and whether the eof bit should be set. 15729 * 15730 * Return 0 is successful and offset will contain the the new offset to use 15731 * for the next write. 15732 * Return negative value for error cases. 15733 **/ 15734 int 15735 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 15736 uint32_t size, uint32_t *offset) 15737 { 15738 struct lpfc_mbx_wr_object *wr_object; 15739 LPFC_MBOXQ_t *mbox; 15740 int rc = 0, i = 0; 15741 uint32_t shdr_status, shdr_add_status; 15742 uint32_t mbox_tmo; 15743 union lpfc_sli4_cfg_shdr *shdr; 15744 struct lpfc_dmabuf *dmabuf; 15745 uint32_t written = 0; 15746 15747 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15748 if (!mbox) 15749 return -ENOMEM; 15750 15751 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15752 LPFC_MBOX_OPCODE_WRITE_OBJECT, 15753 sizeof(struct lpfc_mbx_wr_object) - 15754 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 15755 15756 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 15757 wr_object->u.request.write_offset = *offset; 15758 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 15759 wr_object->u.request.object_name[0] = 15760 cpu_to_le32(wr_object->u.request.object_name[0]); 15761 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 15762 list_for_each_entry(dmabuf, dmabuf_list, list) { 15763 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 15764 break; 15765 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 15766 wr_object->u.request.bde[i].addrHigh = 15767 putPaddrHigh(dmabuf->phys); 15768 if (written + SLI4_PAGE_SIZE >= size) { 15769 wr_object->u.request.bde[i].tus.f.bdeSize = 15770 (size - written); 15771 written += (size - written); 15772 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 15773 } else { 15774 wr_object->u.request.bde[i].tus.f.bdeSize = 15775 SLI4_PAGE_SIZE; 15776 written += SLI4_PAGE_SIZE; 15777 } 15778 i++; 15779 } 15780 wr_object->u.request.bde_count = i; 15781 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 15782 if (!phba->sli4_hba.intr_enable) 15783 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15784 else { 15785 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 15786 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 15787 } 15788 /* The IOCTL status is embedded in the mailbox subheader. */ 15789 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 15790 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15791 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15792 if (rc != MBX_TIMEOUT) 15793 mempool_free(mbox, phba->mbox_mem_pool); 15794 if (shdr_status || shdr_add_status || rc) { 15795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15796 "3025 Write Object mailbox failed with " 15797 "status x%x add_status x%x, mbx status x%x\n", 15798 shdr_status, shdr_add_status, rc); 15799 rc = -ENXIO; 15800 } else 15801 *offset += wr_object->u.response.actual_write_length; 15802 return rc; 15803 } 15804 15805 /** 15806 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 15807 * @vport: pointer to vport data structure. 15808 * 15809 * This function iterate through the mailboxq and clean up all REG_LOGIN 15810 * and REG_VPI mailbox commands associated with the vport. This function 15811 * is called when driver want to restart discovery of the vport due to 15812 * a Clear Virtual Link event. 15813 **/ 15814 void 15815 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 15816 { 15817 struct lpfc_hba *phba = vport->phba; 15818 LPFC_MBOXQ_t *mb, *nextmb; 15819 struct lpfc_dmabuf *mp; 15820 struct lpfc_nodelist *ndlp; 15821 struct lpfc_nodelist *act_mbx_ndlp = NULL; 15822 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 15823 LIST_HEAD(mbox_cmd_list); 15824 uint8_t restart_loop; 15825 15826 /* Clean up internally queued mailbox commands with the vport */ 15827 spin_lock_irq(&phba->hbalock); 15828 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 15829 if (mb->vport != vport) 15830 continue; 15831 15832 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15833 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15834 continue; 15835 15836 list_del(&mb->list); 15837 list_add_tail(&mb->list, &mbox_cmd_list); 15838 } 15839 /* Clean up active mailbox command with the vport */ 15840 mb = phba->sli.mbox_active; 15841 if (mb && (mb->vport == vport)) { 15842 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 15843 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 15844 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15845 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15846 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 15847 /* Put reference count for delayed processing */ 15848 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 15849 /* Unregister the RPI when mailbox complete */ 15850 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15851 } 15852 } 15853 /* Cleanup any mailbox completions which are not yet processed */ 15854 do { 15855 restart_loop = 0; 15856 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 15857 /* 15858 * If this mailox is already processed or it is 15859 * for another vport ignore it. 15860 */ 15861 if ((mb->vport != vport) || 15862 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 15863 continue; 15864 15865 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15866 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15867 continue; 15868 15869 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15870 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15871 ndlp = (struct lpfc_nodelist *)mb->context2; 15872 /* Unregister the RPI when mailbox complete */ 15873 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15874 restart_loop = 1; 15875 spin_unlock_irq(&phba->hbalock); 15876 spin_lock(shost->host_lock); 15877 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15878 spin_unlock(shost->host_lock); 15879 spin_lock_irq(&phba->hbalock); 15880 break; 15881 } 15882 } 15883 } while (restart_loop); 15884 15885 spin_unlock_irq(&phba->hbalock); 15886 15887 /* Release the cleaned-up mailbox commands */ 15888 while (!list_empty(&mbox_cmd_list)) { 15889 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 15890 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15891 mp = (struct lpfc_dmabuf *) (mb->context1); 15892 if (mp) { 15893 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 15894 kfree(mp); 15895 } 15896 ndlp = (struct lpfc_nodelist *) mb->context2; 15897 mb->context2 = NULL; 15898 if (ndlp) { 15899 spin_lock(shost->host_lock); 15900 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15901 spin_unlock(shost->host_lock); 15902 lpfc_nlp_put(ndlp); 15903 } 15904 } 15905 mempool_free(mb, phba->mbox_mem_pool); 15906 } 15907 15908 /* Release the ndlp with the cleaned-up active mailbox command */ 15909 if (act_mbx_ndlp) { 15910 spin_lock(shost->host_lock); 15911 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15912 spin_unlock(shost->host_lock); 15913 lpfc_nlp_put(act_mbx_ndlp); 15914 } 15915 } 15916 15917 /** 15918 * lpfc_drain_txq - Drain the txq 15919 * @phba: Pointer to HBA context object. 15920 * 15921 * This function attempt to submit IOCBs on the txq 15922 * to the adapter. For SLI4 adapters, the txq contains 15923 * ELS IOCBs that have been deferred because the there 15924 * are no SGLs. This congestion can occur with large 15925 * vport counts during node discovery. 15926 **/ 15927 15928 uint32_t 15929 lpfc_drain_txq(struct lpfc_hba *phba) 15930 { 15931 LIST_HEAD(completions); 15932 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 15933 struct lpfc_iocbq *piocbq = 0; 15934 unsigned long iflags = 0; 15935 char *fail_msg = NULL; 15936 struct lpfc_sglq *sglq; 15937 union lpfc_wqe wqe; 15938 15939 spin_lock_irqsave(&phba->hbalock, iflags); 15940 if (pring->txq_cnt > pring->txq_max) 15941 pring->txq_max = pring->txq_cnt; 15942 15943 spin_unlock_irqrestore(&phba->hbalock, iflags); 15944 15945 while (pring->txq_cnt) { 15946 spin_lock_irqsave(&phba->hbalock, iflags); 15947 15948 piocbq = lpfc_sli_ringtx_get(phba, pring); 15949 if (!piocbq) { 15950 spin_unlock_irqrestore(&phba->hbalock, iflags); 15951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15952 "2823 txq empty and txq_cnt is %d\n ", 15953 pring->txq_cnt); 15954 break; 15955 } 15956 sglq = __lpfc_sli_get_sglq(phba, piocbq); 15957 if (!sglq) { 15958 __lpfc_sli_ringtx_put(phba, pring, piocbq); 15959 spin_unlock_irqrestore(&phba->hbalock, iflags); 15960 break; 15961 } 15962 15963 /* The xri and iocb resources secured, 15964 * attempt to issue request 15965 */ 15966 piocbq->sli4_lxritag = sglq->sli4_lxritag; 15967 piocbq->sli4_xritag = sglq->sli4_xritag; 15968 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 15969 fail_msg = "to convert bpl to sgl"; 15970 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 15971 fail_msg = "to convert iocb to wqe"; 15972 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 15973 fail_msg = " - Wq is full"; 15974 else 15975 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 15976 15977 if (fail_msg) { 15978 /* Failed means we can't issue and need to cancel */ 15979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15980 "2822 IOCB failed %s iotag 0x%x " 15981 "xri 0x%x\n", 15982 fail_msg, 15983 piocbq->iotag, piocbq->sli4_xritag); 15984 list_add_tail(&piocbq->list, &completions); 15985 } 15986 spin_unlock_irqrestore(&phba->hbalock, iflags); 15987 } 15988 15989 /* Cancel all the IOCBs that cannot be issued */ 15990 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 15991 IOERR_SLI_ABORTED); 15992 15993 return pring->txq_cnt; 15994 } 15995