1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 69 struct lpfc_cqe *); 70 71 static IOCB_t * 72 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 73 { 74 return &iocbq->iocb; 75 } 76 77 /** 78 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 79 * @q: The Work Queue to operate on. 80 * @wqe: The work Queue Entry to put on the Work queue. 81 * 82 * This routine will copy the contents of @wqe to the next available entry on 83 * the @q. This function will then ring the Work Queue Doorbell to signal the 84 * HBA to start processing the Work Queue Entry. This function returns 0 if 85 * successful. If no entries are available on @q then this function will return 86 * -ENOMEM. 87 * The caller is expected to hold the hbalock when calling this routine. 88 **/ 89 static uint32_t 90 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 91 { 92 union lpfc_wqe *temp_wqe; 93 struct lpfc_register doorbell; 94 uint32_t host_index; 95 96 /* sanity check on queue memory */ 97 if (unlikely(!q)) 98 return -ENOMEM; 99 temp_wqe = q->qe[q->host_index].wqe; 100 101 /* If the host has not yet processed the next entry then we are done */ 102 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 103 return -ENOMEM; 104 /* set consumption flag every once in a while */ 105 if (!((q->host_index + 1) % q->entry_repost)) 106 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 107 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 108 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 109 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 110 111 /* Update the host index before invoking device */ 112 host_index = q->host_index; 113 q->host_index = ((q->host_index + 1) % q->entry_count); 114 115 /* Ring Doorbell */ 116 doorbell.word0 = 0; 117 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 118 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 119 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 120 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 121 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ 122 123 return 0; 124 } 125 126 /** 127 * lpfc_sli4_wq_release - Updates internal hba index for WQ 128 * @q: The Work Queue to operate on. 129 * @index: The index to advance the hba index to. 130 * 131 * This routine will update the HBA index of a queue to reflect consumption of 132 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 133 * an entry the host calls this function to update the queue's internal 134 * pointers. This routine returns the number of entries that were consumed by 135 * the HBA. 136 **/ 137 static uint32_t 138 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 139 { 140 uint32_t released = 0; 141 142 /* sanity check on queue memory */ 143 if (unlikely(!q)) 144 return 0; 145 146 if (q->hba_index == index) 147 return 0; 148 do { 149 q->hba_index = ((q->hba_index + 1) % q->entry_count); 150 released++; 151 } while (q->hba_index != index); 152 return released; 153 } 154 155 /** 156 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 157 * @q: The Mailbox Queue to operate on. 158 * @wqe: The Mailbox Queue Entry to put on the Work queue. 159 * 160 * This routine will copy the contents of @mqe to the next available entry on 161 * the @q. This function will then ring the Work Queue Doorbell to signal the 162 * HBA to start processing the Work Queue Entry. This function returns 0 if 163 * successful. If no entries are available on @q then this function will return 164 * -ENOMEM. 165 * The caller is expected to hold the hbalock when calling this routine. 166 **/ 167 static uint32_t 168 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 169 { 170 struct lpfc_mqe *temp_mqe; 171 struct lpfc_register doorbell; 172 uint32_t host_index; 173 174 /* sanity check on queue memory */ 175 if (unlikely(!q)) 176 return -ENOMEM; 177 temp_mqe = q->qe[q->host_index].mqe; 178 179 /* If the host has not yet processed the next entry then we are done */ 180 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 181 return -ENOMEM; 182 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 183 /* Save off the mailbox pointer for completion */ 184 q->phba->mbox = (MAILBOX_t *)temp_mqe; 185 186 /* Update the host index before invoking device */ 187 host_index = q->host_index; 188 q->host_index = ((q->host_index + 1) % q->entry_count); 189 190 /* Ring Doorbell */ 191 doorbell.word0 = 0; 192 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 193 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 194 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 195 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ 196 return 0; 197 } 198 199 /** 200 * lpfc_sli4_mq_release - Updates internal hba index for MQ 201 * @q: The Mailbox Queue to operate on. 202 * 203 * This routine will update the HBA index of a queue to reflect consumption of 204 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 205 * an entry the host calls this function to update the queue's internal 206 * pointers. This routine returns the number of entries that were consumed by 207 * the HBA. 208 **/ 209 static uint32_t 210 lpfc_sli4_mq_release(struct lpfc_queue *q) 211 { 212 /* sanity check on queue memory */ 213 if (unlikely(!q)) 214 return 0; 215 216 /* Clear the mailbox pointer for completion */ 217 q->phba->mbox = NULL; 218 q->hba_index = ((q->hba_index + 1) % q->entry_count); 219 return 1; 220 } 221 222 /** 223 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 224 * @q: The Event Queue to get the first valid EQE from 225 * 226 * This routine will get the first valid Event Queue Entry from @q, update 227 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 228 * the Queue (no more work to do), or the Queue is full of EQEs that have been 229 * processed, but not popped back to the HBA then this routine will return NULL. 230 **/ 231 static struct lpfc_eqe * 232 lpfc_sli4_eq_get(struct lpfc_queue *q) 233 { 234 struct lpfc_eqe *eqe; 235 236 /* sanity check on queue memory */ 237 if (unlikely(!q)) 238 return NULL; 239 eqe = q->qe[q->hba_index].eqe; 240 241 /* If the next EQE is not valid then we are done */ 242 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 243 return NULL; 244 /* If the host has not yet processed the next entry then we are done */ 245 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 246 return NULL; 247 248 q->hba_index = ((q->hba_index + 1) % q->entry_count); 249 return eqe; 250 } 251 252 /** 253 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 254 * @q: The Event Queue that the host has completed processing for. 255 * @arm: Indicates whether the host wants to arms this CQ. 256 * 257 * This routine will mark all Event Queue Entries on @q, from the last 258 * known completed entry to the last entry that was processed, as completed 259 * by clearing the valid bit for each completion queue entry. Then it will 260 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 261 * The internal host index in the @q will be updated by this routine to indicate 262 * that the host has finished processing the entries. The @arm parameter 263 * indicates that the queue should be rearmed when ringing the doorbell. 264 * 265 * This function will return the number of EQEs that were popped. 266 **/ 267 uint32_t 268 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 269 { 270 uint32_t released = 0; 271 struct lpfc_eqe *temp_eqe; 272 struct lpfc_register doorbell; 273 274 /* sanity check on queue memory */ 275 if (unlikely(!q)) 276 return 0; 277 278 /* while there are valid entries */ 279 while (q->hba_index != q->host_index) { 280 temp_eqe = q->qe[q->host_index].eqe; 281 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 282 released++; 283 q->host_index = ((q->host_index + 1) % q->entry_count); 284 } 285 if (unlikely(released == 0 && !arm)) 286 return 0; 287 288 /* ring doorbell for number popped */ 289 doorbell.word0 = 0; 290 if (arm) { 291 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 292 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 293 } 294 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 295 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 296 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 297 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 298 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 299 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 300 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 301 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 302 readl(q->phba->sli4_hba.EQCQDBregaddr); 303 return released; 304 } 305 306 /** 307 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 308 * @q: The Completion Queue to get the first valid CQE from 309 * 310 * This routine will get the first valid Completion Queue Entry from @q, update 311 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 312 * the Queue (no more work to do), or the Queue is full of CQEs that have been 313 * processed, but not popped back to the HBA then this routine will return NULL. 314 **/ 315 static struct lpfc_cqe * 316 lpfc_sli4_cq_get(struct lpfc_queue *q) 317 { 318 struct lpfc_cqe *cqe; 319 320 /* sanity check on queue memory */ 321 if (unlikely(!q)) 322 return NULL; 323 324 /* If the next CQE is not valid then we are done */ 325 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 326 return NULL; 327 /* If the host has not yet processed the next entry then we are done */ 328 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 329 return NULL; 330 331 cqe = q->qe[q->hba_index].cqe; 332 q->hba_index = ((q->hba_index + 1) % q->entry_count); 333 return cqe; 334 } 335 336 /** 337 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 338 * @q: The Completion Queue that the host has completed processing for. 339 * @arm: Indicates whether the host wants to arms this CQ. 340 * 341 * This routine will mark all Completion queue entries on @q, from the last 342 * known completed entry to the last entry that was processed, as completed 343 * by clearing the valid bit for each completion queue entry. Then it will 344 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 345 * The internal host index in the @q will be updated by this routine to indicate 346 * that the host has finished processing the entries. The @arm parameter 347 * indicates that the queue should be rearmed when ringing the doorbell. 348 * 349 * This function will return the number of CQEs that were released. 350 **/ 351 uint32_t 352 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 353 { 354 uint32_t released = 0; 355 struct lpfc_cqe *temp_qe; 356 struct lpfc_register doorbell; 357 358 /* sanity check on queue memory */ 359 if (unlikely(!q)) 360 return 0; 361 /* while there are valid entries */ 362 while (q->hba_index != q->host_index) { 363 temp_qe = q->qe[q->host_index].cqe; 364 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 365 released++; 366 q->host_index = ((q->host_index + 1) % q->entry_count); 367 } 368 if (unlikely(released == 0 && !arm)) 369 return 0; 370 371 /* ring doorbell for number popped */ 372 doorbell.word0 = 0; 373 if (arm) 374 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 375 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 376 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 377 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 378 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 379 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 380 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 381 return released; 382 } 383 384 /** 385 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 386 * @q: The Header Receive Queue to operate on. 387 * @wqe: The Receive Queue Entry to put on the Receive queue. 388 * 389 * This routine will copy the contents of @wqe to the next available entry on 390 * the @q. This function will then ring the Receive Queue Doorbell to signal the 391 * HBA to start processing the Receive Queue Entry. This function returns the 392 * index that the rqe was copied to if successful. If no entries are available 393 * on @q then this function will return -ENOMEM. 394 * The caller is expected to hold the hbalock when calling this routine. 395 **/ 396 static int 397 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 398 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 399 { 400 struct lpfc_rqe *temp_hrqe; 401 struct lpfc_rqe *temp_drqe; 402 struct lpfc_register doorbell; 403 int put_index = hq->host_index; 404 405 /* sanity check on queue memory */ 406 if (unlikely(!hq) || unlikely(!dq)) 407 return -ENOMEM; 408 temp_hrqe = hq->qe[hq->host_index].rqe; 409 temp_drqe = dq->qe[dq->host_index].rqe; 410 411 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 412 return -EINVAL; 413 if (hq->host_index != dq->host_index) 414 return -EINVAL; 415 /* If the host has not yet processed the next entry then we are done */ 416 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 417 return -EBUSY; 418 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 419 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 420 421 /* Update the host index to point to the next slot */ 422 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 423 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 424 425 /* Ring The Header Receive Queue Doorbell */ 426 if (!(hq->host_index % hq->entry_repost)) { 427 doorbell.word0 = 0; 428 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 429 hq->entry_repost); 430 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 431 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 432 } 433 return put_index; 434 } 435 436 /** 437 * lpfc_sli4_rq_release - Updates internal hba index for RQ 438 * @q: The Header Receive Queue to operate on. 439 * 440 * This routine will update the HBA index of a queue to reflect consumption of 441 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 442 * consumed an entry the host calls this function to update the queue's 443 * internal pointers. This routine returns the number of entries that were 444 * consumed by the HBA. 445 **/ 446 static uint32_t 447 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 448 { 449 /* sanity check on queue memory */ 450 if (unlikely(!hq) || unlikely(!dq)) 451 return 0; 452 453 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 454 return 0; 455 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 456 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 457 return 1; 458 } 459 460 /** 461 * lpfc_cmd_iocb - Get next command iocb entry in the ring 462 * @phba: Pointer to HBA context object. 463 * @pring: Pointer to driver SLI ring object. 464 * 465 * This function returns pointer to next command iocb entry 466 * in the command ring. The caller must hold hbalock to prevent 467 * other threads consume the next command iocb. 468 * SLI-2/SLI-3 provide different sized iocbs. 469 **/ 470 static inline IOCB_t * 471 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 472 { 473 return (IOCB_t *) (((char *) pring->cmdringaddr) + 474 pring->cmdidx * phba->iocb_cmd_size); 475 } 476 477 /** 478 * lpfc_resp_iocb - Get next response iocb entry in the ring 479 * @phba: Pointer to HBA context object. 480 * @pring: Pointer to driver SLI ring object. 481 * 482 * This function returns pointer to next response iocb entry 483 * in the response ring. The caller must hold hbalock to make sure 484 * that no other thread consume the next response iocb. 485 * SLI-2/SLI-3 provide different sized iocbs. 486 **/ 487 static inline IOCB_t * 488 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 489 { 490 return (IOCB_t *) (((char *) pring->rspringaddr) + 491 pring->rspidx * phba->iocb_rsp_size); 492 } 493 494 /** 495 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 496 * @phba: Pointer to HBA context object. 497 * 498 * This function is called with hbalock held. This function 499 * allocates a new driver iocb object from the iocb pool. If the 500 * allocation is successful, it returns pointer to the newly 501 * allocated iocb object else it returns NULL. 502 **/ 503 static struct lpfc_iocbq * 504 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 505 { 506 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 507 struct lpfc_iocbq * iocbq = NULL; 508 509 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 510 if (iocbq) 511 phba->iocb_cnt++; 512 if (phba->iocb_cnt > phba->iocb_max) 513 phba->iocb_max = phba->iocb_cnt; 514 return iocbq; 515 } 516 517 /** 518 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 519 * @phba: Pointer to HBA context object. 520 * @xritag: XRI value. 521 * 522 * This function clears the sglq pointer from the array of acive 523 * sglq's. The xritag that is passed in is used to index into the 524 * array. Before the xritag can be used it needs to be adjusted 525 * by subtracting the xribase. 526 * 527 * Returns sglq ponter = success, NULL = Failure. 528 **/ 529 static struct lpfc_sglq * 530 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 531 { 532 struct lpfc_sglq *sglq; 533 534 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 535 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 536 return sglq; 537 } 538 539 /** 540 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 541 * @phba: Pointer to HBA context object. 542 * @xritag: XRI value. 543 * 544 * This function returns the sglq pointer from the array of acive 545 * sglq's. The xritag that is passed in is used to index into the 546 * array. Before the xritag can be used it needs to be adjusted 547 * by subtracting the xribase. 548 * 549 * Returns sglq ponter = success, NULL = Failure. 550 **/ 551 struct lpfc_sglq * 552 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 553 { 554 struct lpfc_sglq *sglq; 555 556 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 557 return sglq; 558 } 559 560 /** 561 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 562 * @phba: Pointer to HBA context object. 563 * @xritag: xri used in this exchange. 564 * @rrq: The RRQ to be cleared. 565 * 566 **/ 567 void 568 lpfc_clr_rrq_active(struct lpfc_hba *phba, 569 uint16_t xritag, 570 struct lpfc_node_rrq *rrq) 571 { 572 struct lpfc_nodelist *ndlp = NULL; 573 574 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 575 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 576 577 /* The target DID could have been swapped (cable swap) 578 * we should use the ndlp from the findnode if it is 579 * available. 580 */ 581 if ((!ndlp) && rrq->ndlp) 582 ndlp = rrq->ndlp; 583 584 if (!ndlp) 585 goto out; 586 587 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { 588 rrq->send_rrq = 0; 589 rrq->xritag = 0; 590 rrq->rrq_stop_time = 0; 591 } 592 out: 593 mempool_free(rrq, phba->rrq_pool); 594 } 595 596 /** 597 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 598 * @phba: Pointer to HBA context object. 599 * 600 * This function is called with hbalock held. This function 601 * Checks if stop_time (ratov from setting rrq active) has 602 * been reached, if it has and the send_rrq flag is set then 603 * it will call lpfc_send_rrq. If the send_rrq flag is not set 604 * then it will just call the routine to clear the rrq and 605 * free the rrq resource. 606 * The timer is set to the next rrq that is going to expire before 607 * leaving the routine. 608 * 609 **/ 610 void 611 lpfc_handle_rrq_active(struct lpfc_hba *phba) 612 { 613 struct lpfc_node_rrq *rrq; 614 struct lpfc_node_rrq *nextrrq; 615 unsigned long next_time; 616 unsigned long iflags; 617 LIST_HEAD(send_rrq); 618 619 spin_lock_irqsave(&phba->hbalock, iflags); 620 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 621 next_time = jiffies + HZ * (phba->fc_ratov + 1); 622 list_for_each_entry_safe(rrq, nextrrq, 623 &phba->active_rrq_list, list) { 624 if (time_after(jiffies, rrq->rrq_stop_time)) 625 list_move(&rrq->list, &send_rrq); 626 else if (time_before(rrq->rrq_stop_time, next_time)) 627 next_time = rrq->rrq_stop_time; 628 } 629 spin_unlock_irqrestore(&phba->hbalock, iflags); 630 if (!list_empty(&phba->active_rrq_list)) 631 mod_timer(&phba->rrq_tmr, next_time); 632 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 633 list_del(&rrq->list); 634 if (!rrq->send_rrq) 635 /* this call will free the rrq */ 636 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 637 else if (lpfc_send_rrq(phba, rrq)) { 638 /* if we send the rrq then the completion handler 639 * will clear the bit in the xribitmap. 640 */ 641 lpfc_clr_rrq_active(phba, rrq->xritag, 642 rrq); 643 } 644 } 645 } 646 647 /** 648 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 649 * @vport: Pointer to vport context object. 650 * @xri: The xri used in the exchange. 651 * @did: The targets DID for this exchange. 652 * 653 * returns NULL = rrq not found in the phba->active_rrq_list. 654 * rrq = rrq for this xri and target. 655 **/ 656 struct lpfc_node_rrq * 657 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 658 { 659 struct lpfc_hba *phba = vport->phba; 660 struct lpfc_node_rrq *rrq; 661 struct lpfc_node_rrq *nextrrq; 662 unsigned long iflags; 663 664 if (phba->sli_rev != LPFC_SLI_REV4) 665 return NULL; 666 spin_lock_irqsave(&phba->hbalock, iflags); 667 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 668 if (rrq->vport == vport && rrq->xritag == xri && 669 rrq->nlp_DID == did){ 670 list_del(&rrq->list); 671 spin_unlock_irqrestore(&phba->hbalock, iflags); 672 return rrq; 673 } 674 } 675 spin_unlock_irqrestore(&phba->hbalock, iflags); 676 return NULL; 677 } 678 679 /** 680 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 681 * @vport: Pointer to vport context object. 682 * @ndlp: Pointer to the lpfc_node_list structure. 683 * If ndlp is NULL Remove all active RRQs for this vport from the 684 * phba->active_rrq_list and clear the rrq. 685 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 686 **/ 687 void 688 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 689 690 { 691 struct lpfc_hba *phba = vport->phba; 692 struct lpfc_node_rrq *rrq; 693 struct lpfc_node_rrq *nextrrq; 694 unsigned long iflags; 695 LIST_HEAD(rrq_list); 696 697 if (phba->sli_rev != LPFC_SLI_REV4) 698 return; 699 if (!ndlp) { 700 lpfc_sli4_vport_delete_els_xri_aborted(vport); 701 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 702 } 703 spin_lock_irqsave(&phba->hbalock, iflags); 704 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 705 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 706 list_move(&rrq->list, &rrq_list); 707 spin_unlock_irqrestore(&phba->hbalock, iflags); 708 709 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 710 list_del(&rrq->list); 711 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 712 } 713 } 714 715 /** 716 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 717 * @phba: Pointer to HBA context object. 718 * 719 * Remove all rrqs from the phba->active_rrq_list and free them by 720 * calling __lpfc_clr_active_rrq 721 * 722 **/ 723 void 724 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 725 { 726 struct lpfc_node_rrq *rrq; 727 struct lpfc_node_rrq *nextrrq; 728 unsigned long next_time; 729 unsigned long iflags; 730 LIST_HEAD(rrq_list); 731 732 if (phba->sli_rev != LPFC_SLI_REV4) 733 return; 734 spin_lock_irqsave(&phba->hbalock, iflags); 735 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 736 next_time = jiffies + HZ * (phba->fc_ratov * 2); 737 list_splice_init(&phba->active_rrq_list, &rrq_list); 738 spin_unlock_irqrestore(&phba->hbalock, iflags); 739 740 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 741 list_del(&rrq->list); 742 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 743 } 744 if (!list_empty(&phba->active_rrq_list)) 745 mod_timer(&phba->rrq_tmr, next_time); 746 } 747 748 749 /** 750 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 751 * @phba: Pointer to HBA context object. 752 * @ndlp: Targets nodelist pointer for this exchange. 753 * @xritag the xri in the bitmap to test. 754 * 755 * This function is called with hbalock held. This function 756 * returns 0 = rrq not active for this xri 757 * 1 = rrq is valid for this xri. 758 **/ 759 int 760 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 761 uint16_t xritag) 762 { 763 if (!ndlp) 764 return 0; 765 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 766 return 1; 767 else 768 return 0; 769 } 770 771 /** 772 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 773 * @phba: Pointer to HBA context object. 774 * @ndlp: nodelist pointer for this target. 775 * @xritag: xri used in this exchange. 776 * @rxid: Remote Exchange ID. 777 * @send_rrq: Flag used to determine if we should send rrq els cmd. 778 * 779 * This function takes the hbalock. 780 * The active bit is always set in the active rrq xri_bitmap even 781 * if there is no slot avaiable for the other rrq information. 782 * 783 * returns 0 rrq actived for this xri 784 * < 0 No memory or invalid ndlp. 785 **/ 786 int 787 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 788 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 789 { 790 unsigned long iflags; 791 struct lpfc_node_rrq *rrq; 792 int empty; 793 794 if (!ndlp) 795 return -EINVAL; 796 797 if (!phba->cfg_enable_rrq) 798 return -EINVAL; 799 800 spin_lock_irqsave(&phba->hbalock, iflags); 801 if (phba->pport->load_flag & FC_UNLOADING) { 802 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 803 goto out; 804 } 805 806 /* 807 * set the active bit even if there is no mem available. 808 */ 809 if (NLP_CHK_FREE_REQ(ndlp)) 810 goto out; 811 812 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 813 goto out; 814 815 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 816 goto out; 817 818 spin_unlock_irqrestore(&phba->hbalock, iflags); 819 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 820 if (!rrq) { 821 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 822 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 823 " DID:0x%x Send:%d\n", 824 xritag, rxid, ndlp->nlp_DID, send_rrq); 825 return -EINVAL; 826 } 827 rrq->send_rrq = send_rrq; 828 rrq->xritag = xritag; 829 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 830 rrq->ndlp = ndlp; 831 rrq->nlp_DID = ndlp->nlp_DID; 832 rrq->vport = ndlp->vport; 833 rrq->rxid = rxid; 834 rrq->send_rrq = send_rrq; 835 spin_lock_irqsave(&phba->hbalock, iflags); 836 empty = list_empty(&phba->active_rrq_list); 837 list_add_tail(&rrq->list, &phba->active_rrq_list); 838 phba->hba_flag |= HBA_RRQ_ACTIVE; 839 if (empty) 840 lpfc_worker_wake_up(phba); 841 spin_unlock_irqrestore(&phba->hbalock, iflags); 842 return 0; 843 out: 844 spin_unlock_irqrestore(&phba->hbalock, iflags); 845 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 846 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 847 " DID:0x%x Send:%d\n", 848 xritag, rxid, ndlp->nlp_DID, send_rrq); 849 return -EINVAL; 850 } 851 852 /** 853 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 854 * @phba: Pointer to HBA context object. 855 * @piocb: Pointer to the iocbq. 856 * 857 * This function is called with hbalock held. This function 858 * gets a new driver sglq object from the sglq list. If the 859 * list is not empty then it is successful, it returns pointer to the newly 860 * allocated sglq object else it returns NULL. 861 **/ 862 static struct lpfc_sglq * 863 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 864 { 865 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 866 struct lpfc_sglq *sglq = NULL; 867 struct lpfc_sglq *start_sglq = NULL; 868 struct lpfc_scsi_buf *lpfc_cmd; 869 struct lpfc_nodelist *ndlp; 870 int found = 0; 871 872 if (piocbq->iocb_flag & LPFC_IO_FCP) { 873 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 874 ndlp = lpfc_cmd->rdata->pnode; 875 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 876 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 877 ndlp = piocbq->context_un.ndlp; 878 else 879 ndlp = piocbq->context1; 880 881 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 882 start_sglq = sglq; 883 while (!found) { 884 if (!sglq) 885 return NULL; 886 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 887 /* This xri has an rrq outstanding for this DID. 888 * put it back in the list and get another xri. 889 */ 890 list_add_tail(&sglq->list, lpfc_sgl_list); 891 sglq = NULL; 892 list_remove_head(lpfc_sgl_list, sglq, 893 struct lpfc_sglq, list); 894 if (sglq == start_sglq) { 895 sglq = NULL; 896 break; 897 } else 898 continue; 899 } 900 sglq->ndlp = ndlp; 901 found = 1; 902 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 903 sglq->state = SGL_ALLOCATED; 904 } 905 return sglq; 906 } 907 908 /** 909 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 910 * @phba: Pointer to HBA context object. 911 * 912 * This function is called with no lock held. This function 913 * allocates a new driver iocb object from the iocb pool. If the 914 * allocation is successful, it returns pointer to the newly 915 * allocated iocb object else it returns NULL. 916 **/ 917 struct lpfc_iocbq * 918 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 919 { 920 struct lpfc_iocbq * iocbq = NULL; 921 unsigned long iflags; 922 923 spin_lock_irqsave(&phba->hbalock, iflags); 924 iocbq = __lpfc_sli_get_iocbq(phba); 925 spin_unlock_irqrestore(&phba->hbalock, iflags); 926 return iocbq; 927 } 928 929 /** 930 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 931 * @phba: Pointer to HBA context object. 932 * @iocbq: Pointer to driver iocb object. 933 * 934 * This function is called with hbalock held to release driver 935 * iocb object to the iocb pool. The iotag in the iocb object 936 * does not change for each use of the iocb object. This function 937 * clears all other fields of the iocb object when it is freed. 938 * The sqlq structure that holds the xritag and phys and virtual 939 * mappings for the scatter gather list is retrieved from the 940 * active array of sglq. The get of the sglq pointer also clears 941 * the entry in the array. If the status of the IO indiactes that 942 * this IO was aborted then the sglq entry it put on the 943 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 944 * IO has good status or fails for any other reason then the sglq 945 * entry is added to the free list (lpfc_sgl_list). 946 **/ 947 static void 948 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 949 { 950 struct lpfc_sglq *sglq; 951 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 952 unsigned long iflag = 0; 953 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 954 955 if (iocbq->sli4_xritag == NO_XRI) 956 sglq = NULL; 957 else 958 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 959 960 if (sglq) { 961 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 962 (sglq->state != SGL_XRI_ABORTED)) { 963 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 964 iflag); 965 list_add(&sglq->list, 966 &phba->sli4_hba.lpfc_abts_els_sgl_list); 967 spin_unlock_irqrestore( 968 &phba->sli4_hba.abts_sgl_list_lock, iflag); 969 } else { 970 sglq->state = SGL_FREED; 971 sglq->ndlp = NULL; 972 list_add_tail(&sglq->list, 973 &phba->sli4_hba.lpfc_sgl_list); 974 975 /* Check if TXQ queue needs to be serviced */ 976 if (pring->txq_cnt) 977 lpfc_worker_wake_up(phba); 978 } 979 } 980 981 982 /* 983 * Clean all volatile data fields, preserve iotag and node struct. 984 */ 985 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 986 iocbq->sli4_lxritag = NO_XRI; 987 iocbq->sli4_xritag = NO_XRI; 988 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 989 } 990 991 992 /** 993 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 994 * @phba: Pointer to HBA context object. 995 * @iocbq: Pointer to driver iocb object. 996 * 997 * This function is called with hbalock held to release driver 998 * iocb object to the iocb pool. The iotag in the iocb object 999 * does not change for each use of the iocb object. This function 1000 * clears all other fields of the iocb object when it is freed. 1001 **/ 1002 static void 1003 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1004 { 1005 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1006 1007 /* 1008 * Clean all volatile data fields, preserve iotag and node struct. 1009 */ 1010 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1011 iocbq->sli4_xritag = NO_XRI; 1012 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1013 } 1014 1015 /** 1016 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1017 * @phba: Pointer to HBA context object. 1018 * @iocbq: Pointer to driver iocb object. 1019 * 1020 * This function is called with hbalock held to release driver 1021 * iocb object to the iocb pool. The iotag in the iocb object 1022 * does not change for each use of the iocb object. This function 1023 * clears all other fields of the iocb object when it is freed. 1024 **/ 1025 static void 1026 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1027 { 1028 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1029 phba->iocb_cnt--; 1030 } 1031 1032 /** 1033 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1034 * @phba: Pointer to HBA context object. 1035 * @iocbq: Pointer to driver iocb object. 1036 * 1037 * This function is called with no lock held to release the iocb to 1038 * iocb pool. 1039 **/ 1040 void 1041 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1042 { 1043 unsigned long iflags; 1044 1045 /* 1046 * Clean all volatile data fields, preserve iotag and node struct. 1047 */ 1048 spin_lock_irqsave(&phba->hbalock, iflags); 1049 __lpfc_sli_release_iocbq(phba, iocbq); 1050 spin_unlock_irqrestore(&phba->hbalock, iflags); 1051 } 1052 1053 /** 1054 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1055 * @phba: Pointer to HBA context object. 1056 * @iocblist: List of IOCBs. 1057 * @ulpstatus: ULP status in IOCB command field. 1058 * @ulpWord4: ULP word-4 in IOCB command field. 1059 * 1060 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1061 * on the list by invoking the complete callback function associated with the 1062 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1063 * fields. 1064 **/ 1065 void 1066 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1067 uint32_t ulpstatus, uint32_t ulpWord4) 1068 { 1069 struct lpfc_iocbq *piocb; 1070 1071 while (!list_empty(iocblist)) { 1072 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1073 1074 if (!piocb->iocb_cmpl) 1075 lpfc_sli_release_iocbq(phba, piocb); 1076 else { 1077 piocb->iocb.ulpStatus = ulpstatus; 1078 piocb->iocb.un.ulpWord[4] = ulpWord4; 1079 (piocb->iocb_cmpl) (phba, piocb, piocb); 1080 } 1081 } 1082 return; 1083 } 1084 1085 /** 1086 * lpfc_sli_iocb_cmd_type - Get the iocb type 1087 * @iocb_cmnd: iocb command code. 1088 * 1089 * This function is called by ring event handler function to get the iocb type. 1090 * This function translates the iocb command to an iocb command type used to 1091 * decide the final disposition of each completed IOCB. 1092 * The function returns 1093 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1094 * LPFC_SOL_IOCB if it is a solicited iocb completion 1095 * LPFC_ABORT_IOCB if it is an abort iocb 1096 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1097 * 1098 * The caller is not required to hold any lock. 1099 **/ 1100 static lpfc_iocb_type 1101 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1102 { 1103 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1104 1105 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1106 return 0; 1107 1108 switch (iocb_cmnd) { 1109 case CMD_XMIT_SEQUENCE_CR: 1110 case CMD_XMIT_SEQUENCE_CX: 1111 case CMD_XMIT_BCAST_CN: 1112 case CMD_XMIT_BCAST_CX: 1113 case CMD_ELS_REQUEST_CR: 1114 case CMD_ELS_REQUEST_CX: 1115 case CMD_CREATE_XRI_CR: 1116 case CMD_CREATE_XRI_CX: 1117 case CMD_GET_RPI_CN: 1118 case CMD_XMIT_ELS_RSP_CX: 1119 case CMD_GET_RPI_CR: 1120 case CMD_FCP_IWRITE_CR: 1121 case CMD_FCP_IWRITE_CX: 1122 case CMD_FCP_IREAD_CR: 1123 case CMD_FCP_IREAD_CX: 1124 case CMD_FCP_ICMND_CR: 1125 case CMD_FCP_ICMND_CX: 1126 case CMD_FCP_TSEND_CX: 1127 case CMD_FCP_TRSP_CX: 1128 case CMD_FCP_TRECEIVE_CX: 1129 case CMD_FCP_AUTO_TRSP_CX: 1130 case CMD_ADAPTER_MSG: 1131 case CMD_ADAPTER_DUMP: 1132 case CMD_XMIT_SEQUENCE64_CR: 1133 case CMD_XMIT_SEQUENCE64_CX: 1134 case CMD_XMIT_BCAST64_CN: 1135 case CMD_XMIT_BCAST64_CX: 1136 case CMD_ELS_REQUEST64_CR: 1137 case CMD_ELS_REQUEST64_CX: 1138 case CMD_FCP_IWRITE64_CR: 1139 case CMD_FCP_IWRITE64_CX: 1140 case CMD_FCP_IREAD64_CR: 1141 case CMD_FCP_IREAD64_CX: 1142 case CMD_FCP_ICMND64_CR: 1143 case CMD_FCP_ICMND64_CX: 1144 case CMD_FCP_TSEND64_CX: 1145 case CMD_FCP_TRSP64_CX: 1146 case CMD_FCP_TRECEIVE64_CX: 1147 case CMD_GEN_REQUEST64_CR: 1148 case CMD_GEN_REQUEST64_CX: 1149 case CMD_XMIT_ELS_RSP64_CX: 1150 case DSSCMD_IWRITE64_CR: 1151 case DSSCMD_IWRITE64_CX: 1152 case DSSCMD_IREAD64_CR: 1153 case DSSCMD_IREAD64_CX: 1154 type = LPFC_SOL_IOCB; 1155 break; 1156 case CMD_ABORT_XRI_CN: 1157 case CMD_ABORT_XRI_CX: 1158 case CMD_CLOSE_XRI_CN: 1159 case CMD_CLOSE_XRI_CX: 1160 case CMD_XRI_ABORTED_CX: 1161 case CMD_ABORT_MXRI64_CN: 1162 case CMD_XMIT_BLS_RSP64_CX: 1163 type = LPFC_ABORT_IOCB; 1164 break; 1165 case CMD_RCV_SEQUENCE_CX: 1166 case CMD_RCV_ELS_REQ_CX: 1167 case CMD_RCV_SEQUENCE64_CX: 1168 case CMD_RCV_ELS_REQ64_CX: 1169 case CMD_ASYNC_STATUS: 1170 case CMD_IOCB_RCV_SEQ64_CX: 1171 case CMD_IOCB_RCV_ELS64_CX: 1172 case CMD_IOCB_RCV_CONT64_CX: 1173 case CMD_IOCB_RET_XRI64_CX: 1174 type = LPFC_UNSOL_IOCB; 1175 break; 1176 case CMD_IOCB_XMIT_MSEQ64_CR: 1177 case CMD_IOCB_XMIT_MSEQ64_CX: 1178 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1179 case CMD_IOCB_RCV_ELS_LIST64_CX: 1180 case CMD_IOCB_CLOSE_EXTENDED_CN: 1181 case CMD_IOCB_ABORT_EXTENDED_CN: 1182 case CMD_IOCB_RET_HBQE64_CN: 1183 case CMD_IOCB_FCP_IBIDIR64_CR: 1184 case CMD_IOCB_FCP_IBIDIR64_CX: 1185 case CMD_IOCB_FCP_ITASKMGT64_CX: 1186 case CMD_IOCB_LOGENTRY_CN: 1187 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1188 printk("%s - Unhandled SLI-3 Command x%x\n", 1189 __func__, iocb_cmnd); 1190 type = LPFC_UNKNOWN_IOCB; 1191 break; 1192 default: 1193 type = LPFC_UNKNOWN_IOCB; 1194 break; 1195 } 1196 1197 return type; 1198 } 1199 1200 /** 1201 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1202 * @phba: Pointer to HBA context object. 1203 * 1204 * This function is called from SLI initialization code 1205 * to configure every ring of the HBA's SLI interface. The 1206 * caller is not required to hold any lock. This function issues 1207 * a config_ring mailbox command for each ring. 1208 * This function returns zero if successful else returns a negative 1209 * error code. 1210 **/ 1211 static int 1212 lpfc_sli_ring_map(struct lpfc_hba *phba) 1213 { 1214 struct lpfc_sli *psli = &phba->sli; 1215 LPFC_MBOXQ_t *pmb; 1216 MAILBOX_t *pmbox; 1217 int i, rc, ret = 0; 1218 1219 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1220 if (!pmb) 1221 return -ENOMEM; 1222 pmbox = &pmb->u.mb; 1223 phba->link_state = LPFC_INIT_MBX_CMDS; 1224 for (i = 0; i < psli->num_rings; i++) { 1225 lpfc_config_ring(phba, i, pmb); 1226 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1227 if (rc != MBX_SUCCESS) { 1228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1229 "0446 Adapter failed to init (%d), " 1230 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1231 "ring %d\n", 1232 rc, pmbox->mbxCommand, 1233 pmbox->mbxStatus, i); 1234 phba->link_state = LPFC_HBA_ERROR; 1235 ret = -ENXIO; 1236 break; 1237 } 1238 } 1239 mempool_free(pmb, phba->mbox_mem_pool); 1240 return ret; 1241 } 1242 1243 /** 1244 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1245 * @phba: Pointer to HBA context object. 1246 * @pring: Pointer to driver SLI ring object. 1247 * @piocb: Pointer to the driver iocb object. 1248 * 1249 * This function is called with hbalock held. The function adds the 1250 * new iocb to txcmplq of the given ring. This function always returns 1251 * 0. If this function is called for ELS ring, this function checks if 1252 * there is a vport associated with the ELS command. This function also 1253 * starts els_tmofunc timer if this is an ELS command. 1254 **/ 1255 static int 1256 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1257 struct lpfc_iocbq *piocb) 1258 { 1259 list_add_tail(&piocb->list, &pring->txcmplq); 1260 piocb->iocb_flag |= LPFC_IO_ON_Q; 1261 pring->txcmplq_cnt++; 1262 if (pring->txcmplq_cnt > pring->txcmplq_max) 1263 pring->txcmplq_max = pring->txcmplq_cnt; 1264 1265 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1266 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1267 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1268 if (!piocb->vport) 1269 BUG(); 1270 else 1271 mod_timer(&piocb->vport->els_tmofunc, 1272 jiffies + HZ * (phba->fc_ratov << 1)); 1273 } 1274 1275 1276 return 0; 1277 } 1278 1279 /** 1280 * lpfc_sli_ringtx_get - Get first element of the txq 1281 * @phba: Pointer to HBA context object. 1282 * @pring: Pointer to driver SLI ring object. 1283 * 1284 * This function is called with hbalock held to get next 1285 * iocb in txq of the given ring. If there is any iocb in 1286 * the txq, the function returns first iocb in the list after 1287 * removing the iocb from the list, else it returns NULL. 1288 **/ 1289 struct lpfc_iocbq * 1290 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1291 { 1292 struct lpfc_iocbq *cmd_iocb; 1293 1294 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1295 if (cmd_iocb != NULL) 1296 pring->txq_cnt--; 1297 return cmd_iocb; 1298 } 1299 1300 /** 1301 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1302 * @phba: Pointer to HBA context object. 1303 * @pring: Pointer to driver SLI ring object. 1304 * 1305 * This function is called with hbalock held and the caller must post the 1306 * iocb without releasing the lock. If the caller releases the lock, 1307 * iocb slot returned by the function is not guaranteed to be available. 1308 * The function returns pointer to the next available iocb slot if there 1309 * is available slot in the ring, else it returns NULL. 1310 * If the get index of the ring is ahead of the put index, the function 1311 * will post an error attention event to the worker thread to take the 1312 * HBA to offline state. 1313 **/ 1314 static IOCB_t * 1315 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1316 { 1317 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1318 uint32_t max_cmd_idx = pring->numCiocb; 1319 if ((pring->next_cmdidx == pring->cmdidx) && 1320 (++pring->next_cmdidx >= max_cmd_idx)) 1321 pring->next_cmdidx = 0; 1322 1323 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1324 1325 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1326 1327 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1328 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1329 "0315 Ring %d issue: portCmdGet %d " 1330 "is bigger than cmd ring %d\n", 1331 pring->ringno, 1332 pring->local_getidx, max_cmd_idx); 1333 1334 phba->link_state = LPFC_HBA_ERROR; 1335 /* 1336 * All error attention handlers are posted to 1337 * worker thread 1338 */ 1339 phba->work_ha |= HA_ERATT; 1340 phba->work_hs = HS_FFER3; 1341 1342 lpfc_worker_wake_up(phba); 1343 1344 return NULL; 1345 } 1346 1347 if (pring->local_getidx == pring->next_cmdidx) 1348 return NULL; 1349 } 1350 1351 return lpfc_cmd_iocb(phba, pring); 1352 } 1353 1354 /** 1355 * lpfc_sli_next_iotag - Get an iotag for the iocb 1356 * @phba: Pointer to HBA context object. 1357 * @iocbq: Pointer to driver iocb object. 1358 * 1359 * This function gets an iotag for the iocb. If there is no unused iotag and 1360 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1361 * array and assigns a new iotag. 1362 * The function returns the allocated iotag if successful, else returns zero. 1363 * Zero is not a valid iotag. 1364 * The caller is not required to hold any lock. 1365 **/ 1366 uint16_t 1367 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1368 { 1369 struct lpfc_iocbq **new_arr; 1370 struct lpfc_iocbq **old_arr; 1371 size_t new_len; 1372 struct lpfc_sli *psli = &phba->sli; 1373 uint16_t iotag; 1374 1375 spin_lock_irq(&phba->hbalock); 1376 iotag = psli->last_iotag; 1377 if(++iotag < psli->iocbq_lookup_len) { 1378 psli->last_iotag = iotag; 1379 psli->iocbq_lookup[iotag] = iocbq; 1380 spin_unlock_irq(&phba->hbalock); 1381 iocbq->iotag = iotag; 1382 return iotag; 1383 } else if (psli->iocbq_lookup_len < (0xffff 1384 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1385 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1386 spin_unlock_irq(&phba->hbalock); 1387 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1388 GFP_KERNEL); 1389 if (new_arr) { 1390 spin_lock_irq(&phba->hbalock); 1391 old_arr = psli->iocbq_lookup; 1392 if (new_len <= psli->iocbq_lookup_len) { 1393 /* highly unprobable case */ 1394 kfree(new_arr); 1395 iotag = psli->last_iotag; 1396 if(++iotag < psli->iocbq_lookup_len) { 1397 psli->last_iotag = iotag; 1398 psli->iocbq_lookup[iotag] = iocbq; 1399 spin_unlock_irq(&phba->hbalock); 1400 iocbq->iotag = iotag; 1401 return iotag; 1402 } 1403 spin_unlock_irq(&phba->hbalock); 1404 return 0; 1405 } 1406 if (psli->iocbq_lookup) 1407 memcpy(new_arr, old_arr, 1408 ((psli->last_iotag + 1) * 1409 sizeof (struct lpfc_iocbq *))); 1410 psli->iocbq_lookup = new_arr; 1411 psli->iocbq_lookup_len = new_len; 1412 psli->last_iotag = iotag; 1413 psli->iocbq_lookup[iotag] = iocbq; 1414 spin_unlock_irq(&phba->hbalock); 1415 iocbq->iotag = iotag; 1416 kfree(old_arr); 1417 return iotag; 1418 } 1419 } else 1420 spin_unlock_irq(&phba->hbalock); 1421 1422 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1423 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1424 psli->last_iotag); 1425 1426 return 0; 1427 } 1428 1429 /** 1430 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1431 * @phba: Pointer to HBA context object. 1432 * @pring: Pointer to driver SLI ring object. 1433 * @iocb: Pointer to iocb slot in the ring. 1434 * @nextiocb: Pointer to driver iocb object which need to be 1435 * posted to firmware. 1436 * 1437 * This function is called with hbalock held to post a new iocb to 1438 * the firmware. This function copies the new iocb to ring iocb slot and 1439 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1440 * a completion call back for this iocb else the function will free the 1441 * iocb object. 1442 **/ 1443 static void 1444 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1445 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1446 { 1447 /* 1448 * Set up an iotag 1449 */ 1450 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1451 1452 1453 if (pring->ringno == LPFC_ELS_RING) { 1454 lpfc_debugfs_slow_ring_trc(phba, 1455 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1456 *(((uint32_t *) &nextiocb->iocb) + 4), 1457 *(((uint32_t *) &nextiocb->iocb) + 6), 1458 *(((uint32_t *) &nextiocb->iocb) + 7)); 1459 } 1460 1461 /* 1462 * Issue iocb command to adapter 1463 */ 1464 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1465 wmb(); 1466 pring->stats.iocb_cmd++; 1467 1468 /* 1469 * If there is no completion routine to call, we can release the 1470 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1471 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1472 */ 1473 if (nextiocb->iocb_cmpl) 1474 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1475 else 1476 __lpfc_sli_release_iocbq(phba, nextiocb); 1477 1478 /* 1479 * Let the HBA know what IOCB slot will be the next one the 1480 * driver will put a command into. 1481 */ 1482 pring->cmdidx = pring->next_cmdidx; 1483 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1484 } 1485 1486 /** 1487 * lpfc_sli_update_full_ring - Update the chip attention register 1488 * @phba: Pointer to HBA context object. 1489 * @pring: Pointer to driver SLI ring object. 1490 * 1491 * The caller is not required to hold any lock for calling this function. 1492 * This function updates the chip attention bits for the ring to inform firmware 1493 * that there are pending work to be done for this ring and requests an 1494 * interrupt when there is space available in the ring. This function is 1495 * called when the driver is unable to post more iocbs to the ring due 1496 * to unavailability of space in the ring. 1497 **/ 1498 static void 1499 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1500 { 1501 int ringno = pring->ringno; 1502 1503 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1504 1505 wmb(); 1506 1507 /* 1508 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1509 * The HBA will tell us when an IOCB entry is available. 1510 */ 1511 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1512 readl(phba->CAregaddr); /* flush */ 1513 1514 pring->stats.iocb_cmd_full++; 1515 } 1516 1517 /** 1518 * lpfc_sli_update_ring - Update chip attention register 1519 * @phba: Pointer to HBA context object. 1520 * @pring: Pointer to driver SLI ring object. 1521 * 1522 * This function updates the chip attention register bit for the 1523 * given ring to inform HBA that there is more work to be done 1524 * in this ring. The caller is not required to hold any lock. 1525 **/ 1526 static void 1527 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1528 { 1529 int ringno = pring->ringno; 1530 1531 /* 1532 * Tell the HBA that there is work to do in this ring. 1533 */ 1534 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1535 wmb(); 1536 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1537 readl(phba->CAregaddr); /* flush */ 1538 } 1539 } 1540 1541 /** 1542 * lpfc_sli_resume_iocb - Process iocbs in the txq 1543 * @phba: Pointer to HBA context object. 1544 * @pring: Pointer to driver SLI ring object. 1545 * 1546 * This function is called with hbalock held to post pending iocbs 1547 * in the txq to the firmware. This function is called when driver 1548 * detects space available in the ring. 1549 **/ 1550 static void 1551 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1552 { 1553 IOCB_t *iocb; 1554 struct lpfc_iocbq *nextiocb; 1555 1556 /* 1557 * Check to see if: 1558 * (a) there is anything on the txq to send 1559 * (b) link is up 1560 * (c) link attention events can be processed (fcp ring only) 1561 * (d) IOCB processing is not blocked by the outstanding mbox command. 1562 */ 1563 if (pring->txq_cnt && 1564 lpfc_is_link_up(phba) && 1565 (pring->ringno != phba->sli.fcp_ring || 1566 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1567 1568 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1569 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1570 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1571 1572 if (iocb) 1573 lpfc_sli_update_ring(phba, pring); 1574 else 1575 lpfc_sli_update_full_ring(phba, pring); 1576 } 1577 1578 return; 1579 } 1580 1581 /** 1582 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1583 * @phba: Pointer to HBA context object. 1584 * @hbqno: HBQ number. 1585 * 1586 * This function is called with hbalock held to get the next 1587 * available slot for the given HBQ. If there is free slot 1588 * available for the HBQ it will return pointer to the next available 1589 * HBQ entry else it will return NULL. 1590 **/ 1591 static struct lpfc_hbq_entry * 1592 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1593 { 1594 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1595 1596 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1597 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1598 hbqp->next_hbqPutIdx = 0; 1599 1600 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1601 uint32_t raw_index = phba->hbq_get[hbqno]; 1602 uint32_t getidx = le32_to_cpu(raw_index); 1603 1604 hbqp->local_hbqGetIdx = getidx; 1605 1606 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1607 lpfc_printf_log(phba, KERN_ERR, 1608 LOG_SLI | LOG_VPORT, 1609 "1802 HBQ %d: local_hbqGetIdx " 1610 "%u is > than hbqp->entry_count %u\n", 1611 hbqno, hbqp->local_hbqGetIdx, 1612 hbqp->entry_count); 1613 1614 phba->link_state = LPFC_HBA_ERROR; 1615 return NULL; 1616 } 1617 1618 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1619 return NULL; 1620 } 1621 1622 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1623 hbqp->hbqPutIdx; 1624 } 1625 1626 /** 1627 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1628 * @phba: Pointer to HBA context object. 1629 * 1630 * This function is called with no lock held to free all the 1631 * hbq buffers while uninitializing the SLI interface. It also 1632 * frees the HBQ buffers returned by the firmware but not yet 1633 * processed by the upper layers. 1634 **/ 1635 void 1636 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1637 { 1638 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1639 struct hbq_dmabuf *hbq_buf; 1640 unsigned long flags; 1641 int i, hbq_count; 1642 uint32_t hbqno; 1643 1644 hbq_count = lpfc_sli_hbq_count(); 1645 /* Return all memory used by all HBQs */ 1646 spin_lock_irqsave(&phba->hbalock, flags); 1647 for (i = 0; i < hbq_count; ++i) { 1648 list_for_each_entry_safe(dmabuf, next_dmabuf, 1649 &phba->hbqs[i].hbq_buffer_list, list) { 1650 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1651 list_del(&hbq_buf->dbuf.list); 1652 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1653 } 1654 phba->hbqs[i].buffer_count = 0; 1655 } 1656 /* Return all HBQ buffer that are in-fly */ 1657 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1658 list) { 1659 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1660 list_del(&hbq_buf->dbuf.list); 1661 if (hbq_buf->tag == -1) { 1662 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1663 (phba, hbq_buf); 1664 } else { 1665 hbqno = hbq_buf->tag >> 16; 1666 if (hbqno >= LPFC_MAX_HBQS) 1667 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1668 (phba, hbq_buf); 1669 else 1670 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1671 hbq_buf); 1672 } 1673 } 1674 1675 /* Mark the HBQs not in use */ 1676 phba->hbq_in_use = 0; 1677 spin_unlock_irqrestore(&phba->hbalock, flags); 1678 } 1679 1680 /** 1681 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1682 * @phba: Pointer to HBA context object. 1683 * @hbqno: HBQ number. 1684 * @hbq_buf: Pointer to HBQ buffer. 1685 * 1686 * This function is called with the hbalock held to post a 1687 * hbq buffer to the firmware. If the function finds an empty 1688 * slot in the HBQ, it will post the buffer. The function will return 1689 * pointer to the hbq entry if it successfully post the buffer 1690 * else it will return NULL. 1691 **/ 1692 static int 1693 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1694 struct hbq_dmabuf *hbq_buf) 1695 { 1696 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1697 } 1698 1699 /** 1700 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1701 * @phba: Pointer to HBA context object. 1702 * @hbqno: HBQ number. 1703 * @hbq_buf: Pointer to HBQ buffer. 1704 * 1705 * This function is called with the hbalock held to post a hbq buffer to the 1706 * firmware. If the function finds an empty slot in the HBQ, it will post the 1707 * buffer and place it on the hbq_buffer_list. The function will return zero if 1708 * it successfully post the buffer else it will return an error. 1709 **/ 1710 static int 1711 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1712 struct hbq_dmabuf *hbq_buf) 1713 { 1714 struct lpfc_hbq_entry *hbqe; 1715 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1716 1717 /* Get next HBQ entry slot to use */ 1718 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1719 if (hbqe) { 1720 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1721 1722 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1723 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1724 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1725 hbqe->bde.tus.f.bdeFlags = 0; 1726 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1727 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1728 /* Sync SLIM */ 1729 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1730 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1731 /* flush */ 1732 readl(phba->hbq_put + hbqno); 1733 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1734 return 0; 1735 } else 1736 return -ENOMEM; 1737 } 1738 1739 /** 1740 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1741 * @phba: Pointer to HBA context object. 1742 * @hbqno: HBQ number. 1743 * @hbq_buf: Pointer to HBQ buffer. 1744 * 1745 * This function is called with the hbalock held to post an RQE to the SLI4 1746 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1747 * the hbq_buffer_list and return zero, otherwise it will return an error. 1748 **/ 1749 static int 1750 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1751 struct hbq_dmabuf *hbq_buf) 1752 { 1753 int rc; 1754 struct lpfc_rqe hrqe; 1755 struct lpfc_rqe drqe; 1756 1757 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1758 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1759 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1760 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1761 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1762 &hrqe, &drqe); 1763 if (rc < 0) 1764 return rc; 1765 hbq_buf->tag = rc; 1766 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1767 return 0; 1768 } 1769 1770 /* HBQ for ELS and CT traffic. */ 1771 static struct lpfc_hbq_init lpfc_els_hbq = { 1772 .rn = 1, 1773 .entry_count = 256, 1774 .mask_count = 0, 1775 .profile = 0, 1776 .ring_mask = (1 << LPFC_ELS_RING), 1777 .buffer_count = 0, 1778 .init_count = 40, 1779 .add_count = 40, 1780 }; 1781 1782 /* HBQ for the extra ring if needed */ 1783 static struct lpfc_hbq_init lpfc_extra_hbq = { 1784 .rn = 1, 1785 .entry_count = 200, 1786 .mask_count = 0, 1787 .profile = 0, 1788 .ring_mask = (1 << LPFC_EXTRA_RING), 1789 .buffer_count = 0, 1790 .init_count = 0, 1791 .add_count = 5, 1792 }; 1793 1794 /* Array of HBQs */ 1795 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1796 &lpfc_els_hbq, 1797 &lpfc_extra_hbq, 1798 }; 1799 1800 /** 1801 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1802 * @phba: Pointer to HBA context object. 1803 * @hbqno: HBQ number. 1804 * @count: Number of HBQ buffers to be posted. 1805 * 1806 * This function is called with no lock held to post more hbq buffers to the 1807 * given HBQ. The function returns the number of HBQ buffers successfully 1808 * posted. 1809 **/ 1810 static int 1811 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1812 { 1813 uint32_t i, posted = 0; 1814 unsigned long flags; 1815 struct hbq_dmabuf *hbq_buffer; 1816 LIST_HEAD(hbq_buf_list); 1817 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1818 return 0; 1819 1820 if ((phba->hbqs[hbqno].buffer_count + count) > 1821 lpfc_hbq_defs[hbqno]->entry_count) 1822 count = lpfc_hbq_defs[hbqno]->entry_count - 1823 phba->hbqs[hbqno].buffer_count; 1824 if (!count) 1825 return 0; 1826 /* Allocate HBQ entries */ 1827 for (i = 0; i < count; i++) { 1828 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1829 if (!hbq_buffer) 1830 break; 1831 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1832 } 1833 /* Check whether HBQ is still in use */ 1834 spin_lock_irqsave(&phba->hbalock, flags); 1835 if (!phba->hbq_in_use) 1836 goto err; 1837 while (!list_empty(&hbq_buf_list)) { 1838 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1839 dbuf.list); 1840 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1841 (hbqno << 16)); 1842 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1843 phba->hbqs[hbqno].buffer_count++; 1844 posted++; 1845 } else 1846 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1847 } 1848 spin_unlock_irqrestore(&phba->hbalock, flags); 1849 return posted; 1850 err: 1851 spin_unlock_irqrestore(&phba->hbalock, flags); 1852 while (!list_empty(&hbq_buf_list)) { 1853 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1854 dbuf.list); 1855 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1856 } 1857 return 0; 1858 } 1859 1860 /** 1861 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1862 * @phba: Pointer to HBA context object. 1863 * @qno: HBQ number. 1864 * 1865 * This function posts more buffers to the HBQ. This function 1866 * is called with no lock held. The function returns the number of HBQ entries 1867 * successfully allocated. 1868 **/ 1869 int 1870 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1871 { 1872 if (phba->sli_rev == LPFC_SLI_REV4) 1873 return 0; 1874 else 1875 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1876 lpfc_hbq_defs[qno]->add_count); 1877 } 1878 1879 /** 1880 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1881 * @phba: Pointer to HBA context object. 1882 * @qno: HBQ queue number. 1883 * 1884 * This function is called from SLI initialization code path with 1885 * no lock held to post initial HBQ buffers to firmware. The 1886 * function returns the number of HBQ entries successfully allocated. 1887 **/ 1888 static int 1889 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1890 { 1891 if (phba->sli_rev == LPFC_SLI_REV4) 1892 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1893 lpfc_hbq_defs[qno]->entry_count); 1894 else 1895 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1896 lpfc_hbq_defs[qno]->init_count); 1897 } 1898 1899 /** 1900 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1901 * @phba: Pointer to HBA context object. 1902 * @hbqno: HBQ number. 1903 * 1904 * This function removes the first hbq buffer on an hbq list and returns a 1905 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1906 **/ 1907 static struct hbq_dmabuf * 1908 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1909 { 1910 struct lpfc_dmabuf *d_buf; 1911 1912 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1913 if (!d_buf) 1914 return NULL; 1915 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1916 } 1917 1918 /** 1919 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1920 * @phba: Pointer to HBA context object. 1921 * @tag: Tag of the hbq buffer. 1922 * 1923 * This function is called with hbalock held. This function searches 1924 * for the hbq buffer associated with the given tag in the hbq buffer 1925 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1926 * it returns NULL. 1927 **/ 1928 static struct hbq_dmabuf * 1929 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1930 { 1931 struct lpfc_dmabuf *d_buf; 1932 struct hbq_dmabuf *hbq_buf; 1933 uint32_t hbqno; 1934 1935 hbqno = tag >> 16; 1936 if (hbqno >= LPFC_MAX_HBQS) 1937 return NULL; 1938 1939 spin_lock_irq(&phba->hbalock); 1940 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1941 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1942 if (hbq_buf->tag == tag) { 1943 spin_unlock_irq(&phba->hbalock); 1944 return hbq_buf; 1945 } 1946 } 1947 spin_unlock_irq(&phba->hbalock); 1948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1949 "1803 Bad hbq tag. Data: x%x x%x\n", 1950 tag, phba->hbqs[tag >> 16].buffer_count); 1951 return NULL; 1952 } 1953 1954 /** 1955 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 1956 * @phba: Pointer to HBA context object. 1957 * @hbq_buffer: Pointer to HBQ buffer. 1958 * 1959 * This function is called with hbalock. This function gives back 1960 * the hbq buffer to firmware. If the HBQ does not have space to 1961 * post the buffer, it will free the buffer. 1962 **/ 1963 void 1964 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 1965 { 1966 uint32_t hbqno; 1967 1968 if (hbq_buffer) { 1969 hbqno = hbq_buffer->tag >> 16; 1970 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 1971 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1972 } 1973 } 1974 1975 /** 1976 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 1977 * @mbxCommand: mailbox command code. 1978 * 1979 * This function is called by the mailbox event handler function to verify 1980 * that the completed mailbox command is a legitimate mailbox command. If the 1981 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 1982 * and the mailbox event handler will take the HBA offline. 1983 **/ 1984 static int 1985 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 1986 { 1987 uint8_t ret; 1988 1989 switch (mbxCommand) { 1990 case MBX_LOAD_SM: 1991 case MBX_READ_NV: 1992 case MBX_WRITE_NV: 1993 case MBX_WRITE_VPARMS: 1994 case MBX_RUN_BIU_DIAG: 1995 case MBX_INIT_LINK: 1996 case MBX_DOWN_LINK: 1997 case MBX_CONFIG_LINK: 1998 case MBX_CONFIG_RING: 1999 case MBX_RESET_RING: 2000 case MBX_READ_CONFIG: 2001 case MBX_READ_RCONFIG: 2002 case MBX_READ_SPARM: 2003 case MBX_READ_STATUS: 2004 case MBX_READ_RPI: 2005 case MBX_READ_XRI: 2006 case MBX_READ_REV: 2007 case MBX_READ_LNK_STAT: 2008 case MBX_REG_LOGIN: 2009 case MBX_UNREG_LOGIN: 2010 case MBX_CLEAR_LA: 2011 case MBX_DUMP_MEMORY: 2012 case MBX_DUMP_CONTEXT: 2013 case MBX_RUN_DIAGS: 2014 case MBX_RESTART: 2015 case MBX_UPDATE_CFG: 2016 case MBX_DOWN_LOAD: 2017 case MBX_DEL_LD_ENTRY: 2018 case MBX_RUN_PROGRAM: 2019 case MBX_SET_MASK: 2020 case MBX_SET_VARIABLE: 2021 case MBX_UNREG_D_ID: 2022 case MBX_KILL_BOARD: 2023 case MBX_CONFIG_FARP: 2024 case MBX_BEACON: 2025 case MBX_LOAD_AREA: 2026 case MBX_RUN_BIU_DIAG64: 2027 case MBX_CONFIG_PORT: 2028 case MBX_READ_SPARM64: 2029 case MBX_READ_RPI64: 2030 case MBX_REG_LOGIN64: 2031 case MBX_READ_TOPOLOGY: 2032 case MBX_WRITE_WWN: 2033 case MBX_SET_DEBUG: 2034 case MBX_LOAD_EXP_ROM: 2035 case MBX_ASYNCEVT_ENABLE: 2036 case MBX_REG_VPI: 2037 case MBX_UNREG_VPI: 2038 case MBX_HEARTBEAT: 2039 case MBX_PORT_CAPABILITIES: 2040 case MBX_PORT_IOV_CONTROL: 2041 case MBX_SLI4_CONFIG: 2042 case MBX_SLI4_REQ_FTRS: 2043 case MBX_REG_FCFI: 2044 case MBX_UNREG_FCFI: 2045 case MBX_REG_VFI: 2046 case MBX_UNREG_VFI: 2047 case MBX_INIT_VPI: 2048 case MBX_INIT_VFI: 2049 case MBX_RESUME_RPI: 2050 case MBX_READ_EVENT_LOG_STATUS: 2051 case MBX_READ_EVENT_LOG: 2052 case MBX_SECURITY_MGMT: 2053 case MBX_AUTH_PORT: 2054 ret = mbxCommand; 2055 break; 2056 default: 2057 ret = MBX_SHUTDOWN; 2058 break; 2059 } 2060 return ret; 2061 } 2062 2063 /** 2064 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2065 * @phba: Pointer to HBA context object. 2066 * @pmboxq: Pointer to mailbox command. 2067 * 2068 * This is completion handler function for mailbox commands issued from 2069 * lpfc_sli_issue_mbox_wait function. This function is called by the 2070 * mailbox event handler function with no lock held. This function 2071 * will wake up thread waiting on the wait queue pointed by context1 2072 * of the mailbox. 2073 **/ 2074 void 2075 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2076 { 2077 wait_queue_head_t *pdone_q; 2078 unsigned long drvr_flag; 2079 2080 /* 2081 * If pdone_q is empty, the driver thread gave up waiting and 2082 * continued running. 2083 */ 2084 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2085 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2086 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2087 if (pdone_q) 2088 wake_up_interruptible(pdone_q); 2089 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2090 return; 2091 } 2092 2093 2094 /** 2095 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2096 * @phba: Pointer to HBA context object. 2097 * @pmb: Pointer to mailbox object. 2098 * 2099 * This function is the default mailbox completion handler. It 2100 * frees the memory resources associated with the completed mailbox 2101 * command. If the completed command is a REG_LOGIN mailbox command, 2102 * this function will issue a UREG_LOGIN to re-claim the RPI. 2103 **/ 2104 void 2105 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2106 { 2107 struct lpfc_vport *vport = pmb->vport; 2108 struct lpfc_dmabuf *mp; 2109 struct lpfc_nodelist *ndlp; 2110 struct Scsi_Host *shost; 2111 uint16_t rpi, vpi; 2112 int rc; 2113 2114 mp = (struct lpfc_dmabuf *) (pmb->context1); 2115 2116 if (mp) { 2117 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2118 kfree(mp); 2119 } 2120 2121 /* 2122 * If a REG_LOGIN succeeded after node is destroyed or node 2123 * is in re-discovery driver need to cleanup the RPI. 2124 */ 2125 if (!(phba->pport->load_flag & FC_UNLOADING) && 2126 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2127 !pmb->u.mb.mbxStatus) { 2128 rpi = pmb->u.mb.un.varWords[0]; 2129 vpi = pmb->u.mb.un.varRegLogin.vpi; 2130 lpfc_unreg_login(phba, vpi, rpi, pmb); 2131 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2133 if (rc != MBX_NOT_FINISHED) 2134 return; 2135 } 2136 2137 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2138 !(phba->pport->load_flag & FC_UNLOADING) && 2139 !pmb->u.mb.mbxStatus) { 2140 shost = lpfc_shost_from_vport(vport); 2141 spin_lock_irq(shost->host_lock); 2142 vport->vpi_state |= LPFC_VPI_REGISTERED; 2143 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2144 spin_unlock_irq(shost->host_lock); 2145 } 2146 2147 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2148 ndlp = (struct lpfc_nodelist *)pmb->context2; 2149 lpfc_nlp_put(ndlp); 2150 pmb->context2 = NULL; 2151 } 2152 2153 /* Check security permission status on INIT_LINK mailbox command */ 2154 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2155 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2157 "2860 SLI authentication is required " 2158 "for INIT_LINK but has not done yet\n"); 2159 2160 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2161 lpfc_sli4_mbox_cmd_free(phba, pmb); 2162 else 2163 mempool_free(pmb, phba->mbox_mem_pool); 2164 } 2165 2166 /** 2167 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2168 * @phba: Pointer to HBA context object. 2169 * 2170 * This function is called with no lock held. This function processes all 2171 * the completed mailbox commands and gives it to upper layers. The interrupt 2172 * service routine processes mailbox completion interrupt and adds completed 2173 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2174 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2175 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2176 * function returns the mailbox commands to the upper layer by calling the 2177 * completion handler function of each mailbox. 2178 **/ 2179 int 2180 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2181 { 2182 MAILBOX_t *pmbox; 2183 LPFC_MBOXQ_t *pmb; 2184 int rc; 2185 LIST_HEAD(cmplq); 2186 2187 phba->sli.slistat.mbox_event++; 2188 2189 /* Get all completed mailboxe buffers into the cmplq */ 2190 spin_lock_irq(&phba->hbalock); 2191 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2192 spin_unlock_irq(&phba->hbalock); 2193 2194 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2195 do { 2196 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2197 if (pmb == NULL) 2198 break; 2199 2200 pmbox = &pmb->u.mb; 2201 2202 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2203 if (pmb->vport) { 2204 lpfc_debugfs_disc_trc(pmb->vport, 2205 LPFC_DISC_TRC_MBOX_VPORT, 2206 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2207 (uint32_t)pmbox->mbxCommand, 2208 pmbox->un.varWords[0], 2209 pmbox->un.varWords[1]); 2210 } 2211 else { 2212 lpfc_debugfs_disc_trc(phba->pport, 2213 LPFC_DISC_TRC_MBOX, 2214 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2215 (uint32_t)pmbox->mbxCommand, 2216 pmbox->un.varWords[0], 2217 pmbox->un.varWords[1]); 2218 } 2219 } 2220 2221 /* 2222 * It is a fatal error if unknown mbox command completion. 2223 */ 2224 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2225 MBX_SHUTDOWN) { 2226 /* Unknown mailbox command compl */ 2227 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2228 "(%d):0323 Unknown Mailbox command " 2229 "x%x (x%x/x%x) Cmpl\n", 2230 pmb->vport ? pmb->vport->vpi : 0, 2231 pmbox->mbxCommand, 2232 lpfc_sli_config_mbox_subsys_get(phba, 2233 pmb), 2234 lpfc_sli_config_mbox_opcode_get(phba, 2235 pmb)); 2236 phba->link_state = LPFC_HBA_ERROR; 2237 phba->work_hs = HS_FFER3; 2238 lpfc_handle_eratt(phba); 2239 continue; 2240 } 2241 2242 if (pmbox->mbxStatus) { 2243 phba->sli.slistat.mbox_stat_err++; 2244 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2245 /* Mbox cmd cmpl error - RETRYing */ 2246 lpfc_printf_log(phba, KERN_INFO, 2247 LOG_MBOX | LOG_SLI, 2248 "(%d):0305 Mbox cmd cmpl " 2249 "error - RETRYing Data: x%x " 2250 "(x%x/x%x) x%x x%x x%x\n", 2251 pmb->vport ? pmb->vport->vpi : 0, 2252 pmbox->mbxCommand, 2253 lpfc_sli_config_mbox_subsys_get(phba, 2254 pmb), 2255 lpfc_sli_config_mbox_opcode_get(phba, 2256 pmb), 2257 pmbox->mbxStatus, 2258 pmbox->un.varWords[0], 2259 pmb->vport->port_state); 2260 pmbox->mbxStatus = 0; 2261 pmbox->mbxOwner = OWN_HOST; 2262 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2263 if (rc != MBX_NOT_FINISHED) 2264 continue; 2265 } 2266 } 2267 2268 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2269 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2270 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2271 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2272 pmb->vport ? pmb->vport->vpi : 0, 2273 pmbox->mbxCommand, 2274 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2275 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2276 pmb->mbox_cmpl, 2277 *((uint32_t *) pmbox), 2278 pmbox->un.varWords[0], 2279 pmbox->un.varWords[1], 2280 pmbox->un.varWords[2], 2281 pmbox->un.varWords[3], 2282 pmbox->un.varWords[4], 2283 pmbox->un.varWords[5], 2284 pmbox->un.varWords[6], 2285 pmbox->un.varWords[7]); 2286 2287 if (pmb->mbox_cmpl) 2288 pmb->mbox_cmpl(phba,pmb); 2289 } while (1); 2290 return 0; 2291 } 2292 2293 /** 2294 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2295 * @phba: Pointer to HBA context object. 2296 * @pring: Pointer to driver SLI ring object. 2297 * @tag: buffer tag. 2298 * 2299 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2300 * is set in the tag the buffer is posted for a particular exchange, 2301 * the function will return the buffer without replacing the buffer. 2302 * If the buffer is for unsolicited ELS or CT traffic, this function 2303 * returns the buffer and also posts another buffer to the firmware. 2304 **/ 2305 static struct lpfc_dmabuf * 2306 lpfc_sli_get_buff(struct lpfc_hba *phba, 2307 struct lpfc_sli_ring *pring, 2308 uint32_t tag) 2309 { 2310 struct hbq_dmabuf *hbq_entry; 2311 2312 if (tag & QUE_BUFTAG_BIT) 2313 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2314 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2315 if (!hbq_entry) 2316 return NULL; 2317 return &hbq_entry->dbuf; 2318 } 2319 2320 /** 2321 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2322 * @phba: Pointer to HBA context object. 2323 * @pring: Pointer to driver SLI ring object. 2324 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2325 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2326 * @fch_type: the type for the first frame of the sequence. 2327 * 2328 * This function is called with no lock held. This function uses the r_ctl and 2329 * type of the received sequence to find the correct callback function to call 2330 * to process the sequence. 2331 **/ 2332 static int 2333 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2334 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2335 uint32_t fch_type) 2336 { 2337 int i; 2338 2339 /* unSolicited Responses */ 2340 if (pring->prt[0].profile) { 2341 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2342 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2343 saveq); 2344 return 1; 2345 } 2346 /* We must search, based on rctl / type 2347 for the right routine */ 2348 for (i = 0; i < pring->num_mask; i++) { 2349 if ((pring->prt[i].rctl == fch_r_ctl) && 2350 (pring->prt[i].type == fch_type)) { 2351 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2352 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2353 (phba, pring, saveq); 2354 return 1; 2355 } 2356 } 2357 return 0; 2358 } 2359 2360 /** 2361 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2362 * @phba: Pointer to HBA context object. 2363 * @pring: Pointer to driver SLI ring object. 2364 * @saveq: Pointer to the unsolicited iocb. 2365 * 2366 * This function is called with no lock held by the ring event handler 2367 * when there is an unsolicited iocb posted to the response ring by the 2368 * firmware. This function gets the buffer associated with the iocbs 2369 * and calls the event handler for the ring. This function handles both 2370 * qring buffers and hbq buffers. 2371 * When the function returns 1 the caller can free the iocb object otherwise 2372 * upper layer functions will free the iocb objects. 2373 **/ 2374 static int 2375 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2376 struct lpfc_iocbq *saveq) 2377 { 2378 IOCB_t * irsp; 2379 WORD5 * w5p; 2380 uint32_t Rctl, Type; 2381 uint32_t match; 2382 struct lpfc_iocbq *iocbq; 2383 struct lpfc_dmabuf *dmzbuf; 2384 2385 match = 0; 2386 irsp = &(saveq->iocb); 2387 2388 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2389 if (pring->lpfc_sli_rcv_async_status) 2390 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2391 else 2392 lpfc_printf_log(phba, 2393 KERN_WARNING, 2394 LOG_SLI, 2395 "0316 Ring %d handler: unexpected " 2396 "ASYNC_STATUS iocb received evt_code " 2397 "0x%x\n", 2398 pring->ringno, 2399 irsp->un.asyncstat.evt_code); 2400 return 1; 2401 } 2402 2403 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2404 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2405 if (irsp->ulpBdeCount > 0) { 2406 dmzbuf = lpfc_sli_get_buff(phba, pring, 2407 irsp->un.ulpWord[3]); 2408 lpfc_in_buf_free(phba, dmzbuf); 2409 } 2410 2411 if (irsp->ulpBdeCount > 1) { 2412 dmzbuf = lpfc_sli_get_buff(phba, pring, 2413 irsp->unsli3.sli3Words[3]); 2414 lpfc_in_buf_free(phba, dmzbuf); 2415 } 2416 2417 if (irsp->ulpBdeCount > 2) { 2418 dmzbuf = lpfc_sli_get_buff(phba, pring, 2419 irsp->unsli3.sli3Words[7]); 2420 lpfc_in_buf_free(phba, dmzbuf); 2421 } 2422 2423 return 1; 2424 } 2425 2426 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2427 if (irsp->ulpBdeCount != 0) { 2428 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2429 irsp->un.ulpWord[3]); 2430 if (!saveq->context2) 2431 lpfc_printf_log(phba, 2432 KERN_ERR, 2433 LOG_SLI, 2434 "0341 Ring %d Cannot find buffer for " 2435 "an unsolicited iocb. tag 0x%x\n", 2436 pring->ringno, 2437 irsp->un.ulpWord[3]); 2438 } 2439 if (irsp->ulpBdeCount == 2) { 2440 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2441 irsp->unsli3.sli3Words[7]); 2442 if (!saveq->context3) 2443 lpfc_printf_log(phba, 2444 KERN_ERR, 2445 LOG_SLI, 2446 "0342 Ring %d Cannot find buffer for an" 2447 " unsolicited iocb. tag 0x%x\n", 2448 pring->ringno, 2449 irsp->unsli3.sli3Words[7]); 2450 } 2451 list_for_each_entry(iocbq, &saveq->list, list) { 2452 irsp = &(iocbq->iocb); 2453 if (irsp->ulpBdeCount != 0) { 2454 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2455 irsp->un.ulpWord[3]); 2456 if (!iocbq->context2) 2457 lpfc_printf_log(phba, 2458 KERN_ERR, 2459 LOG_SLI, 2460 "0343 Ring %d Cannot find " 2461 "buffer for an unsolicited iocb" 2462 ". tag 0x%x\n", pring->ringno, 2463 irsp->un.ulpWord[3]); 2464 } 2465 if (irsp->ulpBdeCount == 2) { 2466 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2467 irsp->unsli3.sli3Words[7]); 2468 if (!iocbq->context3) 2469 lpfc_printf_log(phba, 2470 KERN_ERR, 2471 LOG_SLI, 2472 "0344 Ring %d Cannot find " 2473 "buffer for an unsolicited " 2474 "iocb. tag 0x%x\n", 2475 pring->ringno, 2476 irsp->unsli3.sli3Words[7]); 2477 } 2478 } 2479 } 2480 if (irsp->ulpBdeCount != 0 && 2481 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2482 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2483 int found = 0; 2484 2485 /* search continue save q for same XRI */ 2486 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2487 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2488 saveq->iocb.unsli3.rcvsli3.ox_id) { 2489 list_add_tail(&saveq->list, &iocbq->list); 2490 found = 1; 2491 break; 2492 } 2493 } 2494 if (!found) 2495 list_add_tail(&saveq->clist, 2496 &pring->iocb_continue_saveq); 2497 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2498 list_del_init(&iocbq->clist); 2499 saveq = iocbq; 2500 irsp = &(saveq->iocb); 2501 } else 2502 return 0; 2503 } 2504 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2505 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2506 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2507 Rctl = FC_RCTL_ELS_REQ; 2508 Type = FC_TYPE_ELS; 2509 } else { 2510 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2511 Rctl = w5p->hcsw.Rctl; 2512 Type = w5p->hcsw.Type; 2513 2514 /* Firmware Workaround */ 2515 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2516 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2517 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2518 Rctl = FC_RCTL_ELS_REQ; 2519 Type = FC_TYPE_ELS; 2520 w5p->hcsw.Rctl = Rctl; 2521 w5p->hcsw.Type = Type; 2522 } 2523 } 2524 2525 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2527 "0313 Ring %d handler: unexpected Rctl x%x " 2528 "Type x%x received\n", 2529 pring->ringno, Rctl, Type); 2530 2531 return 1; 2532 } 2533 2534 /** 2535 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2536 * @phba: Pointer to HBA context object. 2537 * @pring: Pointer to driver SLI ring object. 2538 * @prspiocb: Pointer to response iocb object. 2539 * 2540 * This function looks up the iocb_lookup table to get the command iocb 2541 * corresponding to the given response iocb using the iotag of the 2542 * response iocb. This function is called with the hbalock held. 2543 * This function returns the command iocb object if it finds the command 2544 * iocb else returns NULL. 2545 **/ 2546 static struct lpfc_iocbq * 2547 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2548 struct lpfc_sli_ring *pring, 2549 struct lpfc_iocbq *prspiocb) 2550 { 2551 struct lpfc_iocbq *cmd_iocb = NULL; 2552 uint16_t iotag; 2553 2554 iotag = prspiocb->iocb.ulpIoTag; 2555 2556 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2557 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2558 list_del_init(&cmd_iocb->list); 2559 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2560 pring->txcmplq_cnt--; 2561 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2562 } 2563 return cmd_iocb; 2564 } 2565 2566 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2567 "0317 iotag x%x is out off " 2568 "range: max iotag x%x wd0 x%x\n", 2569 iotag, phba->sli.last_iotag, 2570 *(((uint32_t *) &prspiocb->iocb) + 7)); 2571 return NULL; 2572 } 2573 2574 /** 2575 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2576 * @phba: Pointer to HBA context object. 2577 * @pring: Pointer to driver SLI ring object. 2578 * @iotag: IOCB tag. 2579 * 2580 * This function looks up the iocb_lookup table to get the command iocb 2581 * corresponding to the given iotag. This function is called with the 2582 * hbalock held. 2583 * This function returns the command iocb object if it finds the command 2584 * iocb else returns NULL. 2585 **/ 2586 static struct lpfc_iocbq * 2587 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2588 struct lpfc_sli_ring *pring, uint16_t iotag) 2589 { 2590 struct lpfc_iocbq *cmd_iocb; 2591 2592 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2593 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2594 list_del_init(&cmd_iocb->list); 2595 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2596 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2597 pring->txcmplq_cnt--; 2598 } 2599 return cmd_iocb; 2600 } 2601 2602 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2603 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2604 iotag, phba->sli.last_iotag); 2605 return NULL; 2606 } 2607 2608 /** 2609 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2610 * @phba: Pointer to HBA context object. 2611 * @pring: Pointer to driver SLI ring object. 2612 * @saveq: Pointer to the response iocb to be processed. 2613 * 2614 * This function is called by the ring event handler for non-fcp 2615 * rings when there is a new response iocb in the response ring. 2616 * The caller is not required to hold any locks. This function 2617 * gets the command iocb associated with the response iocb and 2618 * calls the completion handler for the command iocb. If there 2619 * is no completion handler, the function will free the resources 2620 * associated with command iocb. If the response iocb is for 2621 * an already aborted command iocb, the status of the completion 2622 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2623 * This function always returns 1. 2624 **/ 2625 static int 2626 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2627 struct lpfc_iocbq *saveq) 2628 { 2629 struct lpfc_iocbq *cmdiocbp; 2630 int rc = 1; 2631 unsigned long iflag; 2632 2633 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2634 spin_lock_irqsave(&phba->hbalock, iflag); 2635 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2636 spin_unlock_irqrestore(&phba->hbalock, iflag); 2637 2638 if (cmdiocbp) { 2639 if (cmdiocbp->iocb_cmpl) { 2640 /* 2641 * If an ELS command failed send an event to mgmt 2642 * application. 2643 */ 2644 if (saveq->iocb.ulpStatus && 2645 (pring->ringno == LPFC_ELS_RING) && 2646 (cmdiocbp->iocb.ulpCommand == 2647 CMD_ELS_REQUEST64_CR)) 2648 lpfc_send_els_failure_event(phba, 2649 cmdiocbp, saveq); 2650 2651 /* 2652 * Post all ELS completions to the worker thread. 2653 * All other are passed to the completion callback. 2654 */ 2655 if (pring->ringno == LPFC_ELS_RING) { 2656 if ((phba->sli_rev < LPFC_SLI_REV4) && 2657 (cmdiocbp->iocb_flag & 2658 LPFC_DRIVER_ABORTED)) { 2659 spin_lock_irqsave(&phba->hbalock, 2660 iflag); 2661 cmdiocbp->iocb_flag &= 2662 ~LPFC_DRIVER_ABORTED; 2663 spin_unlock_irqrestore(&phba->hbalock, 2664 iflag); 2665 saveq->iocb.ulpStatus = 2666 IOSTAT_LOCAL_REJECT; 2667 saveq->iocb.un.ulpWord[4] = 2668 IOERR_SLI_ABORTED; 2669 2670 /* Firmware could still be in progress 2671 * of DMAing payload, so don't free data 2672 * buffer till after a hbeat. 2673 */ 2674 spin_lock_irqsave(&phba->hbalock, 2675 iflag); 2676 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2677 spin_unlock_irqrestore(&phba->hbalock, 2678 iflag); 2679 } 2680 if (phba->sli_rev == LPFC_SLI_REV4) { 2681 if (saveq->iocb_flag & 2682 LPFC_EXCHANGE_BUSY) { 2683 /* Set cmdiocb flag for the 2684 * exchange busy so sgl (xri) 2685 * will not be released until 2686 * the abort xri is received 2687 * from hba. 2688 */ 2689 spin_lock_irqsave( 2690 &phba->hbalock, iflag); 2691 cmdiocbp->iocb_flag |= 2692 LPFC_EXCHANGE_BUSY; 2693 spin_unlock_irqrestore( 2694 &phba->hbalock, iflag); 2695 } 2696 if (cmdiocbp->iocb_flag & 2697 LPFC_DRIVER_ABORTED) { 2698 /* 2699 * Clear LPFC_DRIVER_ABORTED 2700 * bit in case it was driver 2701 * initiated abort. 2702 */ 2703 spin_lock_irqsave( 2704 &phba->hbalock, iflag); 2705 cmdiocbp->iocb_flag &= 2706 ~LPFC_DRIVER_ABORTED; 2707 spin_unlock_irqrestore( 2708 &phba->hbalock, iflag); 2709 cmdiocbp->iocb.ulpStatus = 2710 IOSTAT_LOCAL_REJECT; 2711 cmdiocbp->iocb.un.ulpWord[4] = 2712 IOERR_ABORT_REQUESTED; 2713 /* 2714 * For SLI4, irsiocb contains 2715 * NO_XRI in sli_xritag, it 2716 * shall not affect releasing 2717 * sgl (xri) process. 2718 */ 2719 saveq->iocb.ulpStatus = 2720 IOSTAT_LOCAL_REJECT; 2721 saveq->iocb.un.ulpWord[4] = 2722 IOERR_SLI_ABORTED; 2723 spin_lock_irqsave( 2724 &phba->hbalock, iflag); 2725 saveq->iocb_flag |= 2726 LPFC_DELAY_MEM_FREE; 2727 spin_unlock_irqrestore( 2728 &phba->hbalock, iflag); 2729 } 2730 } 2731 } 2732 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2733 } else 2734 lpfc_sli_release_iocbq(phba, cmdiocbp); 2735 } else { 2736 /* 2737 * Unknown initiating command based on the response iotag. 2738 * This could be the case on the ELS ring because of 2739 * lpfc_els_abort(). 2740 */ 2741 if (pring->ringno != LPFC_ELS_RING) { 2742 /* 2743 * Ring <ringno> handler: unexpected completion IoTag 2744 * <IoTag> 2745 */ 2746 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2747 "0322 Ring %d handler: " 2748 "unexpected completion IoTag x%x " 2749 "Data: x%x x%x x%x x%x\n", 2750 pring->ringno, 2751 saveq->iocb.ulpIoTag, 2752 saveq->iocb.ulpStatus, 2753 saveq->iocb.un.ulpWord[4], 2754 saveq->iocb.ulpCommand, 2755 saveq->iocb.ulpContext); 2756 } 2757 } 2758 2759 return rc; 2760 } 2761 2762 /** 2763 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2764 * @phba: Pointer to HBA context object. 2765 * @pring: Pointer to driver SLI ring object. 2766 * 2767 * This function is called from the iocb ring event handlers when 2768 * put pointer is ahead of the get pointer for a ring. This function signal 2769 * an error attention condition to the worker thread and the worker 2770 * thread will transition the HBA to offline state. 2771 **/ 2772 static void 2773 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2774 { 2775 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2776 /* 2777 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2778 * rsp ring <portRspMax> 2779 */ 2780 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2781 "0312 Ring %d handler: portRspPut %d " 2782 "is bigger than rsp ring %d\n", 2783 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2784 pring->numRiocb); 2785 2786 phba->link_state = LPFC_HBA_ERROR; 2787 2788 /* 2789 * All error attention handlers are posted to 2790 * worker thread 2791 */ 2792 phba->work_ha |= HA_ERATT; 2793 phba->work_hs = HS_FFER3; 2794 2795 lpfc_worker_wake_up(phba); 2796 2797 return; 2798 } 2799 2800 /** 2801 * lpfc_poll_eratt - Error attention polling timer timeout handler 2802 * @ptr: Pointer to address of HBA context object. 2803 * 2804 * This function is invoked by the Error Attention polling timer when the 2805 * timer times out. It will check the SLI Error Attention register for 2806 * possible attention events. If so, it will post an Error Attention event 2807 * and wake up worker thread to process it. Otherwise, it will set up the 2808 * Error Attention polling timer for the next poll. 2809 **/ 2810 void lpfc_poll_eratt(unsigned long ptr) 2811 { 2812 struct lpfc_hba *phba; 2813 uint32_t eratt = 0; 2814 2815 phba = (struct lpfc_hba *)ptr; 2816 2817 /* Check chip HA register for error event */ 2818 eratt = lpfc_sli_check_eratt(phba); 2819 2820 if (eratt) 2821 /* Tell the worker thread there is work to do */ 2822 lpfc_worker_wake_up(phba); 2823 else 2824 /* Restart the timer for next eratt poll */ 2825 mod_timer(&phba->eratt_poll, jiffies + 2826 HZ * LPFC_ERATT_POLL_INTERVAL); 2827 return; 2828 } 2829 2830 2831 /** 2832 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2833 * @phba: Pointer to HBA context object. 2834 * @pring: Pointer to driver SLI ring object. 2835 * @mask: Host attention register mask for this ring. 2836 * 2837 * This function is called from the interrupt context when there is a ring 2838 * event for the fcp ring. The caller does not hold any lock. 2839 * The function processes each response iocb in the response ring until it 2840 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 2841 * LE bit set. The function will call the completion handler of the command iocb 2842 * if the response iocb indicates a completion for a command iocb or it is 2843 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2844 * function if this is an unsolicited iocb. 2845 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2846 * to check it explicitly. 2847 */ 2848 int 2849 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2850 struct lpfc_sli_ring *pring, uint32_t mask) 2851 { 2852 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2853 IOCB_t *irsp = NULL; 2854 IOCB_t *entry = NULL; 2855 struct lpfc_iocbq *cmdiocbq = NULL; 2856 struct lpfc_iocbq rspiocbq; 2857 uint32_t status; 2858 uint32_t portRspPut, portRspMax; 2859 int rc = 1; 2860 lpfc_iocb_type type; 2861 unsigned long iflag; 2862 uint32_t rsp_cmpl = 0; 2863 2864 spin_lock_irqsave(&phba->hbalock, iflag); 2865 pring->stats.iocb_event++; 2866 2867 /* 2868 * The next available response entry should never exceed the maximum 2869 * entries. If it does, treat it as an adapter hardware error. 2870 */ 2871 portRspMax = pring->numRiocb; 2872 portRspPut = le32_to_cpu(pgp->rspPutInx); 2873 if (unlikely(portRspPut >= portRspMax)) { 2874 lpfc_sli_rsp_pointers_error(phba, pring); 2875 spin_unlock_irqrestore(&phba->hbalock, iflag); 2876 return 1; 2877 } 2878 if (phba->fcp_ring_in_use) { 2879 spin_unlock_irqrestore(&phba->hbalock, iflag); 2880 return 1; 2881 } else 2882 phba->fcp_ring_in_use = 1; 2883 2884 rmb(); 2885 while (pring->rspidx != portRspPut) { 2886 /* 2887 * Fetch an entry off the ring and copy it into a local data 2888 * structure. The copy involves a byte-swap since the 2889 * network byte order and pci byte orders are different. 2890 */ 2891 entry = lpfc_resp_iocb(phba, pring); 2892 phba->last_completion_time = jiffies; 2893 2894 if (++pring->rspidx >= portRspMax) 2895 pring->rspidx = 0; 2896 2897 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2898 (uint32_t *) &rspiocbq.iocb, 2899 phba->iocb_rsp_size); 2900 INIT_LIST_HEAD(&(rspiocbq.list)); 2901 irsp = &rspiocbq.iocb; 2902 2903 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2904 pring->stats.iocb_rsp++; 2905 rsp_cmpl++; 2906 2907 if (unlikely(irsp->ulpStatus)) { 2908 /* 2909 * If resource errors reported from HBA, reduce 2910 * queuedepths of the SCSI device. 2911 */ 2912 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2913 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2914 spin_unlock_irqrestore(&phba->hbalock, iflag); 2915 phba->lpfc_rampdown_queue_depth(phba); 2916 spin_lock_irqsave(&phba->hbalock, iflag); 2917 } 2918 2919 /* Rsp ring <ringno> error: IOCB */ 2920 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2921 "0336 Rsp Ring %d error: IOCB Data: " 2922 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 2923 pring->ringno, 2924 irsp->un.ulpWord[0], 2925 irsp->un.ulpWord[1], 2926 irsp->un.ulpWord[2], 2927 irsp->un.ulpWord[3], 2928 irsp->un.ulpWord[4], 2929 irsp->un.ulpWord[5], 2930 *(uint32_t *)&irsp->un1, 2931 *((uint32_t *)&irsp->un1 + 1)); 2932 } 2933 2934 switch (type) { 2935 case LPFC_ABORT_IOCB: 2936 case LPFC_SOL_IOCB: 2937 /* 2938 * Idle exchange closed via ABTS from port. No iocb 2939 * resources need to be recovered. 2940 */ 2941 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 2942 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2943 "0333 IOCB cmd 0x%x" 2944 " processed. Skipping" 2945 " completion\n", 2946 irsp->ulpCommand); 2947 break; 2948 } 2949 2950 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2951 &rspiocbq); 2952 if (unlikely(!cmdiocbq)) 2953 break; 2954 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2955 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2956 if (cmdiocbq->iocb_cmpl) { 2957 spin_unlock_irqrestore(&phba->hbalock, iflag); 2958 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2959 &rspiocbq); 2960 spin_lock_irqsave(&phba->hbalock, iflag); 2961 } 2962 break; 2963 case LPFC_UNSOL_IOCB: 2964 spin_unlock_irqrestore(&phba->hbalock, iflag); 2965 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 2966 spin_lock_irqsave(&phba->hbalock, iflag); 2967 break; 2968 default: 2969 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2970 char adaptermsg[LPFC_MAX_ADPTMSG]; 2971 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2972 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2973 MAX_MSG_DATA); 2974 dev_warn(&((phba->pcidev)->dev), 2975 "lpfc%d: %s\n", 2976 phba->brd_no, adaptermsg); 2977 } else { 2978 /* Unknown IOCB command */ 2979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2980 "0334 Unknown IOCB command " 2981 "Data: x%x, x%x x%x x%x x%x\n", 2982 type, irsp->ulpCommand, 2983 irsp->ulpStatus, 2984 irsp->ulpIoTag, 2985 irsp->ulpContext); 2986 } 2987 break; 2988 } 2989 2990 /* 2991 * The response IOCB has been processed. Update the ring 2992 * pointer in SLIM. If the port response put pointer has not 2993 * been updated, sync the pgp->rspPutInx and fetch the new port 2994 * response put pointer. 2995 */ 2996 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2997 2998 if (pring->rspidx == portRspPut) 2999 portRspPut = le32_to_cpu(pgp->rspPutInx); 3000 } 3001 3002 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3003 pring->stats.iocb_rsp_full++; 3004 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3005 writel(status, phba->CAregaddr); 3006 readl(phba->CAregaddr); 3007 } 3008 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3009 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3010 pring->stats.iocb_cmd_empty++; 3011 3012 /* Force update of the local copy of cmdGetInx */ 3013 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3014 lpfc_sli_resume_iocb(phba, pring); 3015 3016 if ((pring->lpfc_sli_cmd_available)) 3017 (pring->lpfc_sli_cmd_available) (phba, pring); 3018 3019 } 3020 3021 phba->fcp_ring_in_use = 0; 3022 spin_unlock_irqrestore(&phba->hbalock, iflag); 3023 return rc; 3024 } 3025 3026 /** 3027 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3028 * @phba: Pointer to HBA context object. 3029 * @pring: Pointer to driver SLI ring object. 3030 * @rspiocbp: Pointer to driver response IOCB object. 3031 * 3032 * This function is called from the worker thread when there is a slow-path 3033 * response IOCB to process. This function chains all the response iocbs until 3034 * seeing the iocb with the LE bit set. The function will call 3035 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3036 * completion of a command iocb. The function will call the 3037 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3038 * The function frees the resources or calls the completion handler if this 3039 * iocb is an abort completion. The function returns NULL when the response 3040 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3041 * this function shall chain the iocb on to the iocb_continueq and return the 3042 * response iocb passed in. 3043 **/ 3044 static struct lpfc_iocbq * 3045 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3046 struct lpfc_iocbq *rspiocbp) 3047 { 3048 struct lpfc_iocbq *saveq; 3049 struct lpfc_iocbq *cmdiocbp; 3050 struct lpfc_iocbq *next_iocb; 3051 IOCB_t *irsp = NULL; 3052 uint32_t free_saveq; 3053 uint8_t iocb_cmd_type; 3054 lpfc_iocb_type type; 3055 unsigned long iflag; 3056 int rc; 3057 3058 spin_lock_irqsave(&phba->hbalock, iflag); 3059 /* First add the response iocb to the countinueq list */ 3060 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3061 pring->iocb_continueq_cnt++; 3062 3063 /* Now, determine whether the list is completed for processing */ 3064 irsp = &rspiocbp->iocb; 3065 if (irsp->ulpLe) { 3066 /* 3067 * By default, the driver expects to free all resources 3068 * associated with this iocb completion. 3069 */ 3070 free_saveq = 1; 3071 saveq = list_get_first(&pring->iocb_continueq, 3072 struct lpfc_iocbq, list); 3073 irsp = &(saveq->iocb); 3074 list_del_init(&pring->iocb_continueq); 3075 pring->iocb_continueq_cnt = 0; 3076 3077 pring->stats.iocb_rsp++; 3078 3079 /* 3080 * If resource errors reported from HBA, reduce 3081 * queuedepths of the SCSI device. 3082 */ 3083 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3084 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3085 spin_unlock_irqrestore(&phba->hbalock, iflag); 3086 phba->lpfc_rampdown_queue_depth(phba); 3087 spin_lock_irqsave(&phba->hbalock, iflag); 3088 } 3089 3090 if (irsp->ulpStatus) { 3091 /* Rsp ring <ringno> error: IOCB */ 3092 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3093 "0328 Rsp Ring %d error: " 3094 "IOCB Data: " 3095 "x%x x%x x%x x%x " 3096 "x%x x%x x%x x%x " 3097 "x%x x%x x%x x%x " 3098 "x%x x%x x%x x%x\n", 3099 pring->ringno, 3100 irsp->un.ulpWord[0], 3101 irsp->un.ulpWord[1], 3102 irsp->un.ulpWord[2], 3103 irsp->un.ulpWord[3], 3104 irsp->un.ulpWord[4], 3105 irsp->un.ulpWord[5], 3106 *(((uint32_t *) irsp) + 6), 3107 *(((uint32_t *) irsp) + 7), 3108 *(((uint32_t *) irsp) + 8), 3109 *(((uint32_t *) irsp) + 9), 3110 *(((uint32_t *) irsp) + 10), 3111 *(((uint32_t *) irsp) + 11), 3112 *(((uint32_t *) irsp) + 12), 3113 *(((uint32_t *) irsp) + 13), 3114 *(((uint32_t *) irsp) + 14), 3115 *(((uint32_t *) irsp) + 15)); 3116 } 3117 3118 /* 3119 * Fetch the IOCB command type and call the correct completion 3120 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3121 * get freed back to the lpfc_iocb_list by the discovery 3122 * kernel thread. 3123 */ 3124 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3125 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3126 switch (type) { 3127 case LPFC_SOL_IOCB: 3128 spin_unlock_irqrestore(&phba->hbalock, iflag); 3129 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3130 spin_lock_irqsave(&phba->hbalock, iflag); 3131 break; 3132 3133 case LPFC_UNSOL_IOCB: 3134 spin_unlock_irqrestore(&phba->hbalock, iflag); 3135 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3136 spin_lock_irqsave(&phba->hbalock, iflag); 3137 if (!rc) 3138 free_saveq = 0; 3139 break; 3140 3141 case LPFC_ABORT_IOCB: 3142 cmdiocbp = NULL; 3143 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3144 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3145 saveq); 3146 if (cmdiocbp) { 3147 /* Call the specified completion routine */ 3148 if (cmdiocbp->iocb_cmpl) { 3149 spin_unlock_irqrestore(&phba->hbalock, 3150 iflag); 3151 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3152 saveq); 3153 spin_lock_irqsave(&phba->hbalock, 3154 iflag); 3155 } else 3156 __lpfc_sli_release_iocbq(phba, 3157 cmdiocbp); 3158 } 3159 break; 3160 3161 case LPFC_UNKNOWN_IOCB: 3162 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3163 char adaptermsg[LPFC_MAX_ADPTMSG]; 3164 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3165 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3166 MAX_MSG_DATA); 3167 dev_warn(&((phba->pcidev)->dev), 3168 "lpfc%d: %s\n", 3169 phba->brd_no, adaptermsg); 3170 } else { 3171 /* Unknown IOCB command */ 3172 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3173 "0335 Unknown IOCB " 3174 "command Data: x%x " 3175 "x%x x%x x%x\n", 3176 irsp->ulpCommand, 3177 irsp->ulpStatus, 3178 irsp->ulpIoTag, 3179 irsp->ulpContext); 3180 } 3181 break; 3182 } 3183 3184 if (free_saveq) { 3185 list_for_each_entry_safe(rspiocbp, next_iocb, 3186 &saveq->list, list) { 3187 list_del(&rspiocbp->list); 3188 __lpfc_sli_release_iocbq(phba, rspiocbp); 3189 } 3190 __lpfc_sli_release_iocbq(phba, saveq); 3191 } 3192 rspiocbp = NULL; 3193 } 3194 spin_unlock_irqrestore(&phba->hbalock, iflag); 3195 return rspiocbp; 3196 } 3197 3198 /** 3199 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3200 * @phba: Pointer to HBA context object. 3201 * @pring: Pointer to driver SLI ring object. 3202 * @mask: Host attention register mask for this ring. 3203 * 3204 * This routine wraps the actual slow_ring event process routine from the 3205 * API jump table function pointer from the lpfc_hba struct. 3206 **/ 3207 void 3208 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3209 struct lpfc_sli_ring *pring, uint32_t mask) 3210 { 3211 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3212 } 3213 3214 /** 3215 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3216 * @phba: Pointer to HBA context object. 3217 * @pring: Pointer to driver SLI ring object. 3218 * @mask: Host attention register mask for this ring. 3219 * 3220 * This function is called from the worker thread when there is a ring event 3221 * for non-fcp rings. The caller does not hold any lock. The function will 3222 * remove each response iocb in the response ring and calls the handle 3223 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3224 **/ 3225 static void 3226 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3227 struct lpfc_sli_ring *pring, uint32_t mask) 3228 { 3229 struct lpfc_pgp *pgp; 3230 IOCB_t *entry; 3231 IOCB_t *irsp = NULL; 3232 struct lpfc_iocbq *rspiocbp = NULL; 3233 uint32_t portRspPut, portRspMax; 3234 unsigned long iflag; 3235 uint32_t status; 3236 3237 pgp = &phba->port_gp[pring->ringno]; 3238 spin_lock_irqsave(&phba->hbalock, iflag); 3239 pring->stats.iocb_event++; 3240 3241 /* 3242 * The next available response entry should never exceed the maximum 3243 * entries. If it does, treat it as an adapter hardware error. 3244 */ 3245 portRspMax = pring->numRiocb; 3246 portRspPut = le32_to_cpu(pgp->rspPutInx); 3247 if (portRspPut >= portRspMax) { 3248 /* 3249 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3250 * rsp ring <portRspMax> 3251 */ 3252 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3253 "0303 Ring %d handler: portRspPut %d " 3254 "is bigger than rsp ring %d\n", 3255 pring->ringno, portRspPut, portRspMax); 3256 3257 phba->link_state = LPFC_HBA_ERROR; 3258 spin_unlock_irqrestore(&phba->hbalock, iflag); 3259 3260 phba->work_hs = HS_FFER3; 3261 lpfc_handle_eratt(phba); 3262 3263 return; 3264 } 3265 3266 rmb(); 3267 while (pring->rspidx != portRspPut) { 3268 /* 3269 * Build a completion list and call the appropriate handler. 3270 * The process is to get the next available response iocb, get 3271 * a free iocb from the list, copy the response data into the 3272 * free iocb, insert to the continuation list, and update the 3273 * next response index to slim. This process makes response 3274 * iocb's in the ring available to DMA as fast as possible but 3275 * pays a penalty for a copy operation. Since the iocb is 3276 * only 32 bytes, this penalty is considered small relative to 3277 * the PCI reads for register values and a slim write. When 3278 * the ulpLe field is set, the entire Command has been 3279 * received. 3280 */ 3281 entry = lpfc_resp_iocb(phba, pring); 3282 3283 phba->last_completion_time = jiffies; 3284 rspiocbp = __lpfc_sli_get_iocbq(phba); 3285 if (rspiocbp == NULL) { 3286 printk(KERN_ERR "%s: out of buffers! Failing " 3287 "completion.\n", __func__); 3288 break; 3289 } 3290 3291 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3292 phba->iocb_rsp_size); 3293 irsp = &rspiocbp->iocb; 3294 3295 if (++pring->rspidx >= portRspMax) 3296 pring->rspidx = 0; 3297 3298 if (pring->ringno == LPFC_ELS_RING) { 3299 lpfc_debugfs_slow_ring_trc(phba, 3300 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3301 *(((uint32_t *) irsp) + 4), 3302 *(((uint32_t *) irsp) + 6), 3303 *(((uint32_t *) irsp) + 7)); 3304 } 3305 3306 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3307 3308 spin_unlock_irqrestore(&phba->hbalock, iflag); 3309 /* Handle the response IOCB */ 3310 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3311 spin_lock_irqsave(&phba->hbalock, iflag); 3312 3313 /* 3314 * If the port response put pointer has not been updated, sync 3315 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3316 * response put pointer. 3317 */ 3318 if (pring->rspidx == portRspPut) { 3319 portRspPut = le32_to_cpu(pgp->rspPutInx); 3320 } 3321 } /* while (pring->rspidx != portRspPut) */ 3322 3323 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3324 /* At least one response entry has been freed */ 3325 pring->stats.iocb_rsp_full++; 3326 /* SET RxRE_RSP in Chip Att register */ 3327 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3328 writel(status, phba->CAregaddr); 3329 readl(phba->CAregaddr); /* flush */ 3330 } 3331 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3332 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3333 pring->stats.iocb_cmd_empty++; 3334 3335 /* Force update of the local copy of cmdGetInx */ 3336 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3337 lpfc_sli_resume_iocb(phba, pring); 3338 3339 if ((pring->lpfc_sli_cmd_available)) 3340 (pring->lpfc_sli_cmd_available) (phba, pring); 3341 3342 } 3343 3344 spin_unlock_irqrestore(&phba->hbalock, iflag); 3345 return; 3346 } 3347 3348 /** 3349 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3350 * @phba: Pointer to HBA context object. 3351 * @pring: Pointer to driver SLI ring object. 3352 * @mask: Host attention register mask for this ring. 3353 * 3354 * This function is called from the worker thread when there is a pending 3355 * ELS response iocb on the driver internal slow-path response iocb worker 3356 * queue. The caller does not hold any lock. The function will remove each 3357 * response iocb from the response worker queue and calls the handle 3358 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3359 **/ 3360 static void 3361 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3362 struct lpfc_sli_ring *pring, uint32_t mask) 3363 { 3364 struct lpfc_iocbq *irspiocbq; 3365 struct hbq_dmabuf *dmabuf; 3366 struct lpfc_cq_event *cq_event; 3367 unsigned long iflag; 3368 3369 spin_lock_irqsave(&phba->hbalock, iflag); 3370 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3371 spin_unlock_irqrestore(&phba->hbalock, iflag); 3372 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3373 /* Get the response iocb from the head of work queue */ 3374 spin_lock_irqsave(&phba->hbalock, iflag); 3375 list_remove_head(&phba->sli4_hba.sp_queue_event, 3376 cq_event, struct lpfc_cq_event, list); 3377 spin_unlock_irqrestore(&phba->hbalock, iflag); 3378 3379 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3380 case CQE_CODE_COMPL_WQE: 3381 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3382 cq_event); 3383 /* Translate ELS WCQE to response IOCBQ */ 3384 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3385 irspiocbq); 3386 if (irspiocbq) 3387 lpfc_sli_sp_handle_rspiocb(phba, pring, 3388 irspiocbq); 3389 break; 3390 case CQE_CODE_RECEIVE: 3391 case CQE_CODE_RECEIVE_V1: 3392 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3393 cq_event); 3394 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3395 break; 3396 default: 3397 break; 3398 } 3399 } 3400 } 3401 3402 /** 3403 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3404 * @phba: Pointer to HBA context object. 3405 * @pring: Pointer to driver SLI ring object. 3406 * 3407 * This function aborts all iocbs in the given ring and frees all the iocb 3408 * objects in txq. This function issues an abort iocb for all the iocb commands 3409 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3410 * the return of this function. The caller is not required to hold any locks. 3411 **/ 3412 void 3413 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3414 { 3415 LIST_HEAD(completions); 3416 struct lpfc_iocbq *iocb, *next_iocb; 3417 3418 if (pring->ringno == LPFC_ELS_RING) { 3419 lpfc_fabric_abort_hba(phba); 3420 } 3421 3422 /* Error everything on txq and txcmplq 3423 * First do the txq. 3424 */ 3425 spin_lock_irq(&phba->hbalock); 3426 list_splice_init(&pring->txq, &completions); 3427 pring->txq_cnt = 0; 3428 3429 /* Next issue ABTS for everything on the txcmplq */ 3430 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3431 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3432 3433 spin_unlock_irq(&phba->hbalock); 3434 3435 /* Cancel all the IOCBs from the completions list */ 3436 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3437 IOERR_SLI_ABORTED); 3438 } 3439 3440 /** 3441 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3442 * @phba: Pointer to HBA context object. 3443 * 3444 * This function flushes all iocbs in the fcp ring and frees all the iocb 3445 * objects in txq and txcmplq. This function will not issue abort iocbs 3446 * for all the iocb commands in txcmplq, they will just be returned with 3447 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3448 * slot has been permanently disabled. 3449 **/ 3450 void 3451 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3452 { 3453 LIST_HEAD(txq); 3454 LIST_HEAD(txcmplq); 3455 struct lpfc_sli *psli = &phba->sli; 3456 struct lpfc_sli_ring *pring; 3457 3458 /* Currently, only one fcp ring */ 3459 pring = &psli->ring[psli->fcp_ring]; 3460 3461 spin_lock_irq(&phba->hbalock); 3462 /* Retrieve everything on txq */ 3463 list_splice_init(&pring->txq, &txq); 3464 pring->txq_cnt = 0; 3465 3466 /* Retrieve everything on the txcmplq */ 3467 list_splice_init(&pring->txcmplq, &txcmplq); 3468 pring->txcmplq_cnt = 0; 3469 spin_unlock_irq(&phba->hbalock); 3470 3471 /* Flush the txq */ 3472 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3473 IOERR_SLI_DOWN); 3474 3475 /* Flush the txcmpq */ 3476 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3477 IOERR_SLI_DOWN); 3478 } 3479 3480 /** 3481 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3482 * @phba: Pointer to HBA context object. 3483 * @mask: Bit mask to be checked. 3484 * 3485 * This function reads the host status register and compares 3486 * with the provided bit mask to check if HBA completed 3487 * the restart. This function will wait in a loop for the 3488 * HBA to complete restart. If the HBA does not restart within 3489 * 15 iterations, the function will reset the HBA again. The 3490 * function returns 1 when HBA fail to restart otherwise returns 3491 * zero. 3492 **/ 3493 static int 3494 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3495 { 3496 uint32_t status; 3497 int i = 0; 3498 int retval = 0; 3499 3500 /* Read the HBA Host Status Register */ 3501 if (lpfc_readl(phba->HSregaddr, &status)) 3502 return 1; 3503 3504 /* 3505 * Check status register every 100ms for 5 retries, then every 3506 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3507 * every 2.5 sec for 4. 3508 * Break our of the loop if errors occurred during init. 3509 */ 3510 while (((status & mask) != mask) && 3511 !(status & HS_FFERM) && 3512 i++ < 20) { 3513 3514 if (i <= 5) 3515 msleep(10); 3516 else if (i <= 10) 3517 msleep(500); 3518 else 3519 msleep(2500); 3520 3521 if (i == 15) { 3522 /* Do post */ 3523 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3524 lpfc_sli_brdrestart(phba); 3525 } 3526 /* Read the HBA Host Status Register */ 3527 if (lpfc_readl(phba->HSregaddr, &status)) { 3528 retval = 1; 3529 break; 3530 } 3531 } 3532 3533 /* Check to see if any errors occurred during init */ 3534 if ((status & HS_FFERM) || (i >= 20)) { 3535 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3536 "2751 Adapter failed to restart, " 3537 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3538 status, 3539 readl(phba->MBslimaddr + 0xa8), 3540 readl(phba->MBslimaddr + 0xac)); 3541 phba->link_state = LPFC_HBA_ERROR; 3542 retval = 1; 3543 } 3544 3545 return retval; 3546 } 3547 3548 /** 3549 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3550 * @phba: Pointer to HBA context object. 3551 * @mask: Bit mask to be checked. 3552 * 3553 * This function checks the host status register to check if HBA is 3554 * ready. This function will wait in a loop for the HBA to be ready 3555 * If the HBA is not ready , the function will will reset the HBA PCI 3556 * function again. The function returns 1 when HBA fail to be ready 3557 * otherwise returns zero. 3558 **/ 3559 static int 3560 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3561 { 3562 uint32_t status; 3563 int retval = 0; 3564 3565 /* Read the HBA Host Status Register */ 3566 status = lpfc_sli4_post_status_check(phba); 3567 3568 if (status) { 3569 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3570 lpfc_sli_brdrestart(phba); 3571 status = lpfc_sli4_post_status_check(phba); 3572 } 3573 3574 /* Check to see if any errors occurred during init */ 3575 if (status) { 3576 phba->link_state = LPFC_HBA_ERROR; 3577 retval = 1; 3578 } else 3579 phba->sli4_hba.intr_enable = 0; 3580 3581 return retval; 3582 } 3583 3584 /** 3585 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3586 * @phba: Pointer to HBA context object. 3587 * @mask: Bit mask to be checked. 3588 * 3589 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3590 * from the API jump table function pointer from the lpfc_hba struct. 3591 **/ 3592 int 3593 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3594 { 3595 return phba->lpfc_sli_brdready(phba, mask); 3596 } 3597 3598 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3599 3600 /** 3601 * lpfc_reset_barrier - Make HBA ready for HBA reset 3602 * @phba: Pointer to HBA context object. 3603 * 3604 * This function is called before resetting an HBA. This function is called 3605 * with hbalock held and requests HBA to quiesce DMAs before a reset. 3606 **/ 3607 void lpfc_reset_barrier(struct lpfc_hba *phba) 3608 { 3609 uint32_t __iomem *resp_buf; 3610 uint32_t __iomem *mbox_buf; 3611 volatile uint32_t mbox; 3612 uint32_t hc_copy, ha_copy, resp_data; 3613 int i; 3614 uint8_t hdrtype; 3615 3616 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3617 if (hdrtype != 0x80 || 3618 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3619 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3620 return; 3621 3622 /* 3623 * Tell the other part of the chip to suspend temporarily all 3624 * its DMA activity. 3625 */ 3626 resp_buf = phba->MBslimaddr; 3627 3628 /* Disable the error attention */ 3629 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3630 return; 3631 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3632 readl(phba->HCregaddr); /* flush */ 3633 phba->link_flag |= LS_IGNORE_ERATT; 3634 3635 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3636 return; 3637 if (ha_copy & HA_ERATT) { 3638 /* Clear Chip error bit */ 3639 writel(HA_ERATT, phba->HAregaddr); 3640 phba->pport->stopped = 1; 3641 } 3642 3643 mbox = 0; 3644 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3645 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3646 3647 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3648 mbox_buf = phba->MBslimaddr; 3649 writel(mbox, mbox_buf); 3650 3651 for (i = 0; i < 50; i++) { 3652 if (lpfc_readl((resp_buf + 1), &resp_data)) 3653 return; 3654 if (resp_data != ~(BARRIER_TEST_PATTERN)) 3655 mdelay(1); 3656 else 3657 break; 3658 } 3659 resp_data = 0; 3660 if (lpfc_readl((resp_buf + 1), &resp_data)) 3661 return; 3662 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3663 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3664 phba->pport->stopped) 3665 goto restore_hc; 3666 else 3667 goto clear_errat; 3668 } 3669 3670 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3671 resp_data = 0; 3672 for (i = 0; i < 500; i++) { 3673 if (lpfc_readl(resp_buf, &resp_data)) 3674 return; 3675 if (resp_data != mbox) 3676 mdelay(1); 3677 else 3678 break; 3679 } 3680 3681 clear_errat: 3682 3683 while (++i < 500) { 3684 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3685 return; 3686 if (!(ha_copy & HA_ERATT)) 3687 mdelay(1); 3688 else 3689 break; 3690 } 3691 3692 if (readl(phba->HAregaddr) & HA_ERATT) { 3693 writel(HA_ERATT, phba->HAregaddr); 3694 phba->pport->stopped = 1; 3695 } 3696 3697 restore_hc: 3698 phba->link_flag &= ~LS_IGNORE_ERATT; 3699 writel(hc_copy, phba->HCregaddr); 3700 readl(phba->HCregaddr); /* flush */ 3701 } 3702 3703 /** 3704 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3705 * @phba: Pointer to HBA context object. 3706 * 3707 * This function issues a kill_board mailbox command and waits for 3708 * the error attention interrupt. This function is called for stopping 3709 * the firmware processing. The caller is not required to hold any 3710 * locks. This function calls lpfc_hba_down_post function to free 3711 * any pending commands after the kill. The function will return 1 when it 3712 * fails to kill the board else will return 0. 3713 **/ 3714 int 3715 lpfc_sli_brdkill(struct lpfc_hba *phba) 3716 { 3717 struct lpfc_sli *psli; 3718 LPFC_MBOXQ_t *pmb; 3719 uint32_t status; 3720 uint32_t ha_copy; 3721 int retval; 3722 int i = 0; 3723 3724 psli = &phba->sli; 3725 3726 /* Kill HBA */ 3727 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3728 "0329 Kill HBA Data: x%x x%x\n", 3729 phba->pport->port_state, psli->sli_flag); 3730 3731 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3732 if (!pmb) 3733 return 1; 3734 3735 /* Disable the error attention */ 3736 spin_lock_irq(&phba->hbalock); 3737 if (lpfc_readl(phba->HCregaddr, &status)) { 3738 spin_unlock_irq(&phba->hbalock); 3739 mempool_free(pmb, phba->mbox_mem_pool); 3740 return 1; 3741 } 3742 status &= ~HC_ERINT_ENA; 3743 writel(status, phba->HCregaddr); 3744 readl(phba->HCregaddr); /* flush */ 3745 phba->link_flag |= LS_IGNORE_ERATT; 3746 spin_unlock_irq(&phba->hbalock); 3747 3748 lpfc_kill_board(phba, pmb); 3749 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3750 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3751 3752 if (retval != MBX_SUCCESS) { 3753 if (retval != MBX_BUSY) 3754 mempool_free(pmb, phba->mbox_mem_pool); 3755 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3756 "2752 KILL_BOARD command failed retval %d\n", 3757 retval); 3758 spin_lock_irq(&phba->hbalock); 3759 phba->link_flag &= ~LS_IGNORE_ERATT; 3760 spin_unlock_irq(&phba->hbalock); 3761 return 1; 3762 } 3763 3764 spin_lock_irq(&phba->hbalock); 3765 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3766 spin_unlock_irq(&phba->hbalock); 3767 3768 mempool_free(pmb, phba->mbox_mem_pool); 3769 3770 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3771 * attention every 100ms for 3 seconds. If we don't get ERATT after 3772 * 3 seconds we still set HBA_ERROR state because the status of the 3773 * board is now undefined. 3774 */ 3775 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3776 return 1; 3777 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3778 mdelay(100); 3779 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3780 return 1; 3781 } 3782 3783 del_timer_sync(&psli->mbox_tmo); 3784 if (ha_copy & HA_ERATT) { 3785 writel(HA_ERATT, phba->HAregaddr); 3786 phba->pport->stopped = 1; 3787 } 3788 spin_lock_irq(&phba->hbalock); 3789 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3790 psli->mbox_active = NULL; 3791 phba->link_flag &= ~LS_IGNORE_ERATT; 3792 spin_unlock_irq(&phba->hbalock); 3793 3794 lpfc_hba_down_post(phba); 3795 phba->link_state = LPFC_HBA_ERROR; 3796 3797 return ha_copy & HA_ERATT ? 0 : 1; 3798 } 3799 3800 /** 3801 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3802 * @phba: Pointer to HBA context object. 3803 * 3804 * This function resets the HBA by writing HC_INITFF to the control 3805 * register. After the HBA resets, this function resets all the iocb ring 3806 * indices. This function disables PCI layer parity checking during 3807 * the reset. 3808 * This function returns 0 always. 3809 * The caller is not required to hold any locks. 3810 **/ 3811 int 3812 lpfc_sli_brdreset(struct lpfc_hba *phba) 3813 { 3814 struct lpfc_sli *psli; 3815 struct lpfc_sli_ring *pring; 3816 uint16_t cfg_value; 3817 int i; 3818 3819 psli = &phba->sli; 3820 3821 /* Reset HBA */ 3822 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3823 "0325 Reset HBA Data: x%x x%x\n", 3824 phba->pport->port_state, psli->sli_flag); 3825 3826 /* perform board reset */ 3827 phba->fc_eventTag = 0; 3828 phba->link_events = 0; 3829 phba->pport->fc_myDID = 0; 3830 phba->pport->fc_prevDID = 0; 3831 3832 /* Turn off parity checking and serr during the physical reset */ 3833 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3834 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3835 (cfg_value & 3836 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3837 3838 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3839 3840 /* Now toggle INITFF bit in the Host Control Register */ 3841 writel(HC_INITFF, phba->HCregaddr); 3842 mdelay(1); 3843 readl(phba->HCregaddr); /* flush */ 3844 writel(0, phba->HCregaddr); 3845 readl(phba->HCregaddr); /* flush */ 3846 3847 /* Restore PCI cmd register */ 3848 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3849 3850 /* Initialize relevant SLI info */ 3851 for (i = 0; i < psli->num_rings; i++) { 3852 pring = &psli->ring[i]; 3853 pring->flag = 0; 3854 pring->rspidx = 0; 3855 pring->next_cmdidx = 0; 3856 pring->local_getidx = 0; 3857 pring->cmdidx = 0; 3858 pring->missbufcnt = 0; 3859 } 3860 3861 phba->link_state = LPFC_WARM_START; 3862 return 0; 3863 } 3864 3865 /** 3866 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3867 * @phba: Pointer to HBA context object. 3868 * 3869 * This function resets a SLI4 HBA. This function disables PCI layer parity 3870 * checking during resets the device. The caller is not required to hold 3871 * any locks. 3872 * 3873 * This function returns 0 always. 3874 **/ 3875 int 3876 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3877 { 3878 struct lpfc_sli *psli = &phba->sli; 3879 uint16_t cfg_value; 3880 3881 /* Reset HBA */ 3882 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3883 "0295 Reset HBA Data: x%x x%x\n", 3884 phba->pport->port_state, psli->sli_flag); 3885 3886 /* perform board reset */ 3887 phba->fc_eventTag = 0; 3888 phba->link_events = 0; 3889 phba->pport->fc_myDID = 0; 3890 phba->pport->fc_prevDID = 0; 3891 3892 spin_lock_irq(&phba->hbalock); 3893 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3894 phba->fcf.fcf_flag = 0; 3895 spin_unlock_irq(&phba->hbalock); 3896 3897 /* Now physically reset the device */ 3898 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3899 "0389 Performing PCI function reset!\n"); 3900 3901 /* Turn off parity checking and serr during the physical reset */ 3902 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3903 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3904 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3905 3906 /* Perform FCoE PCI function reset */ 3907 lpfc_sli4_queue_destroy(phba); 3908 lpfc_pci_function_reset(phba); 3909 3910 /* Restore PCI cmd register */ 3911 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3912 3913 return 0; 3914 } 3915 3916 /** 3917 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3918 * @phba: Pointer to HBA context object. 3919 * 3920 * This function is called in the SLI initialization code path to 3921 * restart the HBA. The caller is not required to hold any lock. 3922 * This function writes MBX_RESTART mailbox command to the SLIM and 3923 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 3924 * function to free any pending commands. The function enables 3925 * POST only during the first initialization. The function returns zero. 3926 * The function does not guarantee completion of MBX_RESTART mailbox 3927 * command before the return of this function. 3928 **/ 3929 static int 3930 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 3931 { 3932 MAILBOX_t *mb; 3933 struct lpfc_sli *psli; 3934 volatile uint32_t word0; 3935 void __iomem *to_slim; 3936 uint32_t hba_aer_enabled; 3937 3938 spin_lock_irq(&phba->hbalock); 3939 3940 /* Take PCIe device Advanced Error Reporting (AER) state */ 3941 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3942 3943 psli = &phba->sli; 3944 3945 /* Restart HBA */ 3946 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3947 "0337 Restart HBA Data: x%x x%x\n", 3948 phba->pport->port_state, psli->sli_flag); 3949 3950 word0 = 0; 3951 mb = (MAILBOX_t *) &word0; 3952 mb->mbxCommand = MBX_RESTART; 3953 mb->mbxHc = 1; 3954 3955 lpfc_reset_barrier(phba); 3956 3957 to_slim = phba->MBslimaddr; 3958 writel(*(uint32_t *) mb, to_slim); 3959 readl(to_slim); /* flush */ 3960 3961 /* Only skip post after fc_ffinit is completed */ 3962 if (phba->pport->port_state) 3963 word0 = 1; /* This is really setting up word1 */ 3964 else 3965 word0 = 0; /* This is really setting up word1 */ 3966 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3967 writel(*(uint32_t *) mb, to_slim); 3968 readl(to_slim); /* flush */ 3969 3970 lpfc_sli_brdreset(phba); 3971 phba->pport->stopped = 0; 3972 phba->link_state = LPFC_INIT_START; 3973 phba->hba_flag = 0; 3974 spin_unlock_irq(&phba->hbalock); 3975 3976 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3977 psli->stats_start = get_seconds(); 3978 3979 /* Give the INITFF and Post time to settle. */ 3980 mdelay(100); 3981 3982 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3983 if (hba_aer_enabled) 3984 pci_disable_pcie_error_reporting(phba->pcidev); 3985 3986 lpfc_hba_down_post(phba); 3987 3988 return 0; 3989 } 3990 3991 /** 3992 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 3993 * @phba: Pointer to HBA context object. 3994 * 3995 * This function is called in the SLI initialization code path to restart 3996 * a SLI4 HBA. The caller is not required to hold any lock. 3997 * At the end of the function, it calls lpfc_hba_down_post function to 3998 * free any pending commands. 3999 **/ 4000 static int 4001 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4002 { 4003 struct lpfc_sli *psli = &phba->sli; 4004 uint32_t hba_aer_enabled; 4005 4006 /* Restart HBA */ 4007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4008 "0296 Restart HBA Data: x%x x%x\n", 4009 phba->pport->port_state, psli->sli_flag); 4010 4011 /* Take PCIe device Advanced Error Reporting (AER) state */ 4012 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4013 4014 lpfc_sli4_brdreset(phba); 4015 4016 spin_lock_irq(&phba->hbalock); 4017 phba->pport->stopped = 0; 4018 phba->link_state = LPFC_INIT_START; 4019 phba->hba_flag = 0; 4020 spin_unlock_irq(&phba->hbalock); 4021 4022 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4023 psli->stats_start = get_seconds(); 4024 4025 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4026 if (hba_aer_enabled) 4027 pci_disable_pcie_error_reporting(phba->pcidev); 4028 4029 lpfc_hba_down_post(phba); 4030 4031 return 0; 4032 } 4033 4034 /** 4035 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4036 * @phba: Pointer to HBA context object. 4037 * 4038 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4039 * API jump table function pointer from the lpfc_hba struct. 4040 **/ 4041 int 4042 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4043 { 4044 return phba->lpfc_sli_brdrestart(phba); 4045 } 4046 4047 /** 4048 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4049 * @phba: Pointer to HBA context object. 4050 * 4051 * This function is called after a HBA restart to wait for successful 4052 * restart of the HBA. Successful restart of the HBA is indicated by 4053 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4054 * iteration, the function will restart the HBA again. The function returns 4055 * zero if HBA successfully restarted else returns negative error code. 4056 **/ 4057 static int 4058 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4059 { 4060 uint32_t status, i = 0; 4061 4062 /* Read the HBA Host Status Register */ 4063 if (lpfc_readl(phba->HSregaddr, &status)) 4064 return -EIO; 4065 4066 /* Check status register to see what current state is */ 4067 i = 0; 4068 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4069 4070 /* Check every 10ms for 10 retries, then every 100ms for 90 4071 * retries, then every 1 sec for 50 retires for a total of 4072 * ~60 seconds before reset the board again and check every 4073 * 1 sec for 50 retries. The up to 60 seconds before the 4074 * board ready is required by the Falcon FIPS zeroization 4075 * complete, and any reset the board in between shall cause 4076 * restart of zeroization, further delay the board ready. 4077 */ 4078 if (i++ >= 200) { 4079 /* Adapter failed to init, timeout, status reg 4080 <status> */ 4081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4082 "0436 Adapter failed to init, " 4083 "timeout, status reg x%x, " 4084 "FW Data: A8 x%x AC x%x\n", status, 4085 readl(phba->MBslimaddr + 0xa8), 4086 readl(phba->MBslimaddr + 0xac)); 4087 phba->link_state = LPFC_HBA_ERROR; 4088 return -ETIMEDOUT; 4089 } 4090 4091 /* Check to see if any errors occurred during init */ 4092 if (status & HS_FFERM) { 4093 /* ERROR: During chipset initialization */ 4094 /* Adapter failed to init, chipset, status reg 4095 <status> */ 4096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4097 "0437 Adapter failed to init, " 4098 "chipset, status reg x%x, " 4099 "FW Data: A8 x%x AC x%x\n", status, 4100 readl(phba->MBslimaddr + 0xa8), 4101 readl(phba->MBslimaddr + 0xac)); 4102 phba->link_state = LPFC_HBA_ERROR; 4103 return -EIO; 4104 } 4105 4106 if (i <= 10) 4107 msleep(10); 4108 else if (i <= 100) 4109 msleep(100); 4110 else 4111 msleep(1000); 4112 4113 if (i == 150) { 4114 /* Do post */ 4115 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4116 lpfc_sli_brdrestart(phba); 4117 } 4118 /* Read the HBA Host Status Register */ 4119 if (lpfc_readl(phba->HSregaddr, &status)) 4120 return -EIO; 4121 } 4122 4123 /* Check to see if any errors occurred during init */ 4124 if (status & HS_FFERM) { 4125 /* ERROR: During chipset initialization */ 4126 /* Adapter failed to init, chipset, status reg <status> */ 4127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4128 "0438 Adapter failed to init, chipset, " 4129 "status reg x%x, " 4130 "FW Data: A8 x%x AC x%x\n", status, 4131 readl(phba->MBslimaddr + 0xa8), 4132 readl(phba->MBslimaddr + 0xac)); 4133 phba->link_state = LPFC_HBA_ERROR; 4134 return -EIO; 4135 } 4136 4137 /* Clear all interrupt enable conditions */ 4138 writel(0, phba->HCregaddr); 4139 readl(phba->HCregaddr); /* flush */ 4140 4141 /* setup host attn register */ 4142 writel(0xffffffff, phba->HAregaddr); 4143 readl(phba->HAregaddr); /* flush */ 4144 return 0; 4145 } 4146 4147 /** 4148 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4149 * 4150 * This function calculates and returns the number of HBQs required to be 4151 * configured. 4152 **/ 4153 int 4154 lpfc_sli_hbq_count(void) 4155 { 4156 return ARRAY_SIZE(lpfc_hbq_defs); 4157 } 4158 4159 /** 4160 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4161 * 4162 * This function adds the number of hbq entries in every HBQ to get 4163 * the total number of hbq entries required for the HBA and returns 4164 * the total count. 4165 **/ 4166 static int 4167 lpfc_sli_hbq_entry_count(void) 4168 { 4169 int hbq_count = lpfc_sli_hbq_count(); 4170 int count = 0; 4171 int i; 4172 4173 for (i = 0; i < hbq_count; ++i) 4174 count += lpfc_hbq_defs[i]->entry_count; 4175 return count; 4176 } 4177 4178 /** 4179 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4180 * 4181 * This function calculates amount of memory required for all hbq entries 4182 * to be configured and returns the total memory required. 4183 **/ 4184 int 4185 lpfc_sli_hbq_size(void) 4186 { 4187 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4188 } 4189 4190 /** 4191 * lpfc_sli_hbq_setup - configure and initialize HBQs 4192 * @phba: Pointer to HBA context object. 4193 * 4194 * This function is called during the SLI initialization to configure 4195 * all the HBQs and post buffers to the HBQ. The caller is not 4196 * required to hold any locks. This function will return zero if successful 4197 * else it will return negative error code. 4198 **/ 4199 static int 4200 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4201 { 4202 int hbq_count = lpfc_sli_hbq_count(); 4203 LPFC_MBOXQ_t *pmb; 4204 MAILBOX_t *pmbox; 4205 uint32_t hbqno; 4206 uint32_t hbq_entry_index; 4207 4208 /* Get a Mailbox buffer to setup mailbox 4209 * commands for HBA initialization 4210 */ 4211 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4212 4213 if (!pmb) 4214 return -ENOMEM; 4215 4216 pmbox = &pmb->u.mb; 4217 4218 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4219 phba->link_state = LPFC_INIT_MBX_CMDS; 4220 phba->hbq_in_use = 1; 4221 4222 hbq_entry_index = 0; 4223 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4224 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4225 phba->hbqs[hbqno].hbqPutIdx = 0; 4226 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4227 phba->hbqs[hbqno].entry_count = 4228 lpfc_hbq_defs[hbqno]->entry_count; 4229 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4230 hbq_entry_index, pmb); 4231 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4232 4233 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4234 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4235 mbxStatus <status>, ring <num> */ 4236 4237 lpfc_printf_log(phba, KERN_ERR, 4238 LOG_SLI | LOG_VPORT, 4239 "1805 Adapter failed to init. " 4240 "Data: x%x x%x x%x\n", 4241 pmbox->mbxCommand, 4242 pmbox->mbxStatus, hbqno); 4243 4244 phba->link_state = LPFC_HBA_ERROR; 4245 mempool_free(pmb, phba->mbox_mem_pool); 4246 return -ENXIO; 4247 } 4248 } 4249 phba->hbq_count = hbq_count; 4250 4251 mempool_free(pmb, phba->mbox_mem_pool); 4252 4253 /* Initially populate or replenish the HBQs */ 4254 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4255 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4256 return 0; 4257 } 4258 4259 /** 4260 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4261 * @phba: Pointer to HBA context object. 4262 * 4263 * This function is called during the SLI initialization to configure 4264 * all the HBQs and post buffers to the HBQ. The caller is not 4265 * required to hold any locks. This function will return zero if successful 4266 * else it will return negative error code. 4267 **/ 4268 static int 4269 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4270 { 4271 phba->hbq_in_use = 1; 4272 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4273 phba->hbq_count = 1; 4274 /* Initially populate or replenish the HBQs */ 4275 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4276 return 0; 4277 } 4278 4279 /** 4280 * lpfc_sli_config_port - Issue config port mailbox command 4281 * @phba: Pointer to HBA context object. 4282 * @sli_mode: sli mode - 2/3 4283 * 4284 * This function is called by the sli intialization code path 4285 * to issue config_port mailbox command. This function restarts the 4286 * HBA firmware and issues a config_port mailbox command to configure 4287 * the SLI interface in the sli mode specified by sli_mode 4288 * variable. The caller is not required to hold any locks. 4289 * The function returns 0 if successful, else returns negative error 4290 * code. 4291 **/ 4292 int 4293 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4294 { 4295 LPFC_MBOXQ_t *pmb; 4296 uint32_t resetcount = 0, rc = 0, done = 0; 4297 4298 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4299 if (!pmb) { 4300 phba->link_state = LPFC_HBA_ERROR; 4301 return -ENOMEM; 4302 } 4303 4304 phba->sli_rev = sli_mode; 4305 while (resetcount < 2 && !done) { 4306 spin_lock_irq(&phba->hbalock); 4307 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4308 spin_unlock_irq(&phba->hbalock); 4309 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4310 lpfc_sli_brdrestart(phba); 4311 rc = lpfc_sli_chipset_init(phba); 4312 if (rc) 4313 break; 4314 4315 spin_lock_irq(&phba->hbalock); 4316 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4317 spin_unlock_irq(&phba->hbalock); 4318 resetcount++; 4319 4320 /* Call pre CONFIG_PORT mailbox command initialization. A 4321 * value of 0 means the call was successful. Any other 4322 * nonzero value is a failure, but if ERESTART is returned, 4323 * the driver may reset the HBA and try again. 4324 */ 4325 rc = lpfc_config_port_prep(phba); 4326 if (rc == -ERESTART) { 4327 phba->link_state = LPFC_LINK_UNKNOWN; 4328 continue; 4329 } else if (rc) 4330 break; 4331 4332 phba->link_state = LPFC_INIT_MBX_CMDS; 4333 lpfc_config_port(phba, pmb); 4334 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4335 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4336 LPFC_SLI3_HBQ_ENABLED | 4337 LPFC_SLI3_CRP_ENABLED | 4338 LPFC_SLI3_BG_ENABLED | 4339 LPFC_SLI3_DSS_ENABLED); 4340 if (rc != MBX_SUCCESS) { 4341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4342 "0442 Adapter failed to init, mbxCmd x%x " 4343 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4344 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4345 spin_lock_irq(&phba->hbalock); 4346 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4347 spin_unlock_irq(&phba->hbalock); 4348 rc = -ENXIO; 4349 } else { 4350 /* Allow asynchronous mailbox command to go through */ 4351 spin_lock_irq(&phba->hbalock); 4352 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4353 spin_unlock_irq(&phba->hbalock); 4354 done = 1; 4355 4356 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4357 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4358 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4359 "3110 Port did not grant ASABT\n"); 4360 } 4361 } 4362 if (!done) { 4363 rc = -EINVAL; 4364 goto do_prep_failed; 4365 } 4366 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4367 if (!pmb->u.mb.un.varCfgPort.cMA) { 4368 rc = -ENXIO; 4369 goto do_prep_failed; 4370 } 4371 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4372 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4373 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4374 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4375 phba->max_vpi : phba->max_vports; 4376 4377 } else 4378 phba->max_vpi = 0; 4379 phba->fips_level = 0; 4380 phba->fips_spec_rev = 0; 4381 if (pmb->u.mb.un.varCfgPort.gdss) { 4382 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4383 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4384 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4385 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4386 "2850 Security Crypto Active. FIPS x%d " 4387 "(Spec Rev: x%d)", 4388 phba->fips_level, phba->fips_spec_rev); 4389 } 4390 if (pmb->u.mb.un.varCfgPort.sec_err) { 4391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4392 "2856 Config Port Security Crypto " 4393 "Error: x%x ", 4394 pmb->u.mb.un.varCfgPort.sec_err); 4395 } 4396 if (pmb->u.mb.un.varCfgPort.gerbm) 4397 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4398 if (pmb->u.mb.un.varCfgPort.gcrp) 4399 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4400 4401 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4402 phba->port_gp = phba->mbox->us.s3_pgp.port; 4403 4404 if (phba->cfg_enable_bg) { 4405 if (pmb->u.mb.un.varCfgPort.gbg) 4406 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4407 else 4408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4409 "0443 Adapter did not grant " 4410 "BlockGuard\n"); 4411 } 4412 } else { 4413 phba->hbq_get = NULL; 4414 phba->port_gp = phba->mbox->us.s2.port; 4415 phba->max_vpi = 0; 4416 } 4417 do_prep_failed: 4418 mempool_free(pmb, phba->mbox_mem_pool); 4419 return rc; 4420 } 4421 4422 4423 /** 4424 * lpfc_sli_hba_setup - SLI intialization function 4425 * @phba: Pointer to HBA context object. 4426 * 4427 * This function is the main SLI intialization function. This function 4428 * is called by the HBA intialization code, HBA reset code and HBA 4429 * error attention handler code. Caller is not required to hold any 4430 * locks. This function issues config_port mailbox command to configure 4431 * the SLI, setup iocb rings and HBQ rings. In the end the function 4432 * calls the config_port_post function to issue init_link mailbox 4433 * command and to start the discovery. The function will return zero 4434 * if successful, else it will return negative error code. 4435 **/ 4436 int 4437 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4438 { 4439 uint32_t rc; 4440 int mode = 3, i; 4441 int longs; 4442 4443 switch (lpfc_sli_mode) { 4444 case 2: 4445 if (phba->cfg_enable_npiv) { 4446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4447 "1824 NPIV enabled: Override lpfc_sli_mode " 4448 "parameter (%d) to auto (0).\n", 4449 lpfc_sli_mode); 4450 break; 4451 } 4452 mode = 2; 4453 break; 4454 case 0: 4455 case 3: 4456 break; 4457 default: 4458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4459 "1819 Unrecognized lpfc_sli_mode " 4460 "parameter: %d.\n", lpfc_sli_mode); 4461 4462 break; 4463 } 4464 4465 rc = lpfc_sli_config_port(phba, mode); 4466 4467 if (rc && lpfc_sli_mode == 3) 4468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4469 "1820 Unable to select SLI-3. " 4470 "Not supported by adapter.\n"); 4471 if (rc && mode != 2) 4472 rc = lpfc_sli_config_port(phba, 2); 4473 if (rc) 4474 goto lpfc_sli_hba_setup_error; 4475 4476 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4477 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4478 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4479 if (!rc) { 4480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4481 "2709 This device supports " 4482 "Advanced Error Reporting (AER)\n"); 4483 spin_lock_irq(&phba->hbalock); 4484 phba->hba_flag |= HBA_AER_ENABLED; 4485 spin_unlock_irq(&phba->hbalock); 4486 } else { 4487 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4488 "2708 This device does not support " 4489 "Advanced Error Reporting (AER)\n"); 4490 phba->cfg_aer_support = 0; 4491 } 4492 } 4493 4494 if (phba->sli_rev == 3) { 4495 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4496 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4497 } else { 4498 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4499 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4500 phba->sli3_options = 0; 4501 } 4502 4503 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4504 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4505 phba->sli_rev, phba->max_vpi); 4506 rc = lpfc_sli_ring_map(phba); 4507 4508 if (rc) 4509 goto lpfc_sli_hba_setup_error; 4510 4511 /* Initialize VPIs. */ 4512 if (phba->sli_rev == LPFC_SLI_REV3) { 4513 /* 4514 * The VPI bitmask and physical ID array are allocated 4515 * and initialized once only - at driver load. A port 4516 * reset doesn't need to reinitialize this memory. 4517 */ 4518 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4519 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4520 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4521 GFP_KERNEL); 4522 if (!phba->vpi_bmask) { 4523 rc = -ENOMEM; 4524 goto lpfc_sli_hba_setup_error; 4525 } 4526 4527 phba->vpi_ids = kzalloc( 4528 (phba->max_vpi+1) * sizeof(uint16_t), 4529 GFP_KERNEL); 4530 if (!phba->vpi_ids) { 4531 kfree(phba->vpi_bmask); 4532 rc = -ENOMEM; 4533 goto lpfc_sli_hba_setup_error; 4534 } 4535 for (i = 0; i < phba->max_vpi; i++) 4536 phba->vpi_ids[i] = i; 4537 } 4538 } 4539 4540 /* Init HBQs */ 4541 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4542 rc = lpfc_sli_hbq_setup(phba); 4543 if (rc) 4544 goto lpfc_sli_hba_setup_error; 4545 } 4546 spin_lock_irq(&phba->hbalock); 4547 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4548 spin_unlock_irq(&phba->hbalock); 4549 4550 rc = lpfc_config_port_post(phba); 4551 if (rc) 4552 goto lpfc_sli_hba_setup_error; 4553 4554 return rc; 4555 4556 lpfc_sli_hba_setup_error: 4557 phba->link_state = LPFC_HBA_ERROR; 4558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4559 "0445 Firmware initialization failed\n"); 4560 return rc; 4561 } 4562 4563 /** 4564 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4565 * @phba: Pointer to HBA context object. 4566 * @mboxq: mailbox pointer. 4567 * This function issue a dump mailbox command to read config region 4568 * 23 and parse the records in the region and populate driver 4569 * data structure. 4570 **/ 4571 static int 4572 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4573 { 4574 LPFC_MBOXQ_t *mboxq; 4575 struct lpfc_dmabuf *mp; 4576 struct lpfc_mqe *mqe; 4577 uint32_t data_length; 4578 int rc; 4579 4580 /* Program the default value of vlan_id and fc_map */ 4581 phba->valid_vlan = 0; 4582 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4583 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4584 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4585 4586 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4587 if (!mboxq) 4588 return -ENOMEM; 4589 4590 mqe = &mboxq->u.mqe; 4591 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 4592 rc = -ENOMEM; 4593 goto out_free_mboxq; 4594 } 4595 4596 mp = (struct lpfc_dmabuf *) mboxq->context1; 4597 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4598 4599 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4600 "(%d):2571 Mailbox cmd x%x Status x%x " 4601 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4602 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4603 "CQ: x%x x%x x%x x%x\n", 4604 mboxq->vport ? mboxq->vport->vpi : 0, 4605 bf_get(lpfc_mqe_command, mqe), 4606 bf_get(lpfc_mqe_status, mqe), 4607 mqe->un.mb_words[0], mqe->un.mb_words[1], 4608 mqe->un.mb_words[2], mqe->un.mb_words[3], 4609 mqe->un.mb_words[4], mqe->un.mb_words[5], 4610 mqe->un.mb_words[6], mqe->un.mb_words[7], 4611 mqe->un.mb_words[8], mqe->un.mb_words[9], 4612 mqe->un.mb_words[10], mqe->un.mb_words[11], 4613 mqe->un.mb_words[12], mqe->un.mb_words[13], 4614 mqe->un.mb_words[14], mqe->un.mb_words[15], 4615 mqe->un.mb_words[16], mqe->un.mb_words[50], 4616 mboxq->mcqe.word0, 4617 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4618 mboxq->mcqe.trailer); 4619 4620 if (rc) { 4621 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4622 kfree(mp); 4623 rc = -EIO; 4624 goto out_free_mboxq; 4625 } 4626 data_length = mqe->un.mb_words[5]; 4627 if (data_length > DMP_RGN23_SIZE) { 4628 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4629 kfree(mp); 4630 rc = -EIO; 4631 goto out_free_mboxq; 4632 } 4633 4634 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4635 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4636 kfree(mp); 4637 rc = 0; 4638 4639 out_free_mboxq: 4640 mempool_free(mboxq, phba->mbox_mem_pool); 4641 return rc; 4642 } 4643 4644 /** 4645 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4646 * @phba: pointer to lpfc hba data structure. 4647 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4648 * @vpd: pointer to the memory to hold resulting port vpd data. 4649 * @vpd_size: On input, the number of bytes allocated to @vpd. 4650 * On output, the number of data bytes in @vpd. 4651 * 4652 * This routine executes a READ_REV SLI4 mailbox command. In 4653 * addition, this routine gets the port vpd data. 4654 * 4655 * Return codes 4656 * 0 - successful 4657 * -ENOMEM - could not allocated memory. 4658 **/ 4659 static int 4660 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4661 uint8_t *vpd, uint32_t *vpd_size) 4662 { 4663 int rc = 0; 4664 uint32_t dma_size; 4665 struct lpfc_dmabuf *dmabuf; 4666 struct lpfc_mqe *mqe; 4667 4668 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4669 if (!dmabuf) 4670 return -ENOMEM; 4671 4672 /* 4673 * Get a DMA buffer for the vpd data resulting from the READ_REV 4674 * mailbox command. 4675 */ 4676 dma_size = *vpd_size; 4677 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4678 dma_size, 4679 &dmabuf->phys, 4680 GFP_KERNEL); 4681 if (!dmabuf->virt) { 4682 kfree(dmabuf); 4683 return -ENOMEM; 4684 } 4685 memset(dmabuf->virt, 0, dma_size); 4686 4687 /* 4688 * The SLI4 implementation of READ_REV conflicts at word1, 4689 * bits 31:16 and SLI4 adds vpd functionality not present 4690 * in SLI3. This code corrects the conflicts. 4691 */ 4692 lpfc_read_rev(phba, mboxq); 4693 mqe = &mboxq->u.mqe; 4694 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4695 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4696 mqe->un.read_rev.word1 &= 0x0000FFFF; 4697 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4698 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4699 4700 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4701 if (rc) { 4702 dma_free_coherent(&phba->pcidev->dev, dma_size, 4703 dmabuf->virt, dmabuf->phys); 4704 kfree(dmabuf); 4705 return -EIO; 4706 } 4707 4708 /* 4709 * The available vpd length cannot be bigger than the 4710 * DMA buffer passed to the port. Catch the less than 4711 * case and update the caller's size. 4712 */ 4713 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4714 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4715 4716 memcpy(vpd, dmabuf->virt, *vpd_size); 4717 4718 dma_free_coherent(&phba->pcidev->dev, dma_size, 4719 dmabuf->virt, dmabuf->phys); 4720 kfree(dmabuf); 4721 return 0; 4722 } 4723 4724 /** 4725 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 4726 * @phba: pointer to lpfc hba data structure. 4727 * 4728 * This routine retrieves SLI4 device physical port name this PCI function 4729 * is attached to. 4730 * 4731 * Return codes 4732 * 0 - sucessful 4733 * otherwise - failed to retrieve physical port name 4734 **/ 4735 static int 4736 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 4737 { 4738 LPFC_MBOXQ_t *mboxq; 4739 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 4740 struct lpfc_controller_attribute *cntl_attr; 4741 struct lpfc_mbx_get_port_name *get_port_name; 4742 void *virtaddr = NULL; 4743 uint32_t alloclen, reqlen; 4744 uint32_t shdr_status, shdr_add_status; 4745 union lpfc_sli4_cfg_shdr *shdr; 4746 char cport_name = 0; 4747 int rc; 4748 4749 /* We assume nothing at this point */ 4750 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4751 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 4752 4753 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4754 if (!mboxq) 4755 return -ENOMEM; 4756 /* obtain link type and link number via READ_CONFIG */ 4757 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4758 lpfc_sli4_read_config(phba); 4759 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 4760 goto retrieve_ppname; 4761 4762 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 4763 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 4764 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4765 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 4766 LPFC_SLI4_MBX_NEMBED); 4767 if (alloclen < reqlen) { 4768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4769 "3084 Allocated DMA memory size (%d) is " 4770 "less than the requested DMA memory size " 4771 "(%d)\n", alloclen, reqlen); 4772 rc = -ENOMEM; 4773 goto out_free_mboxq; 4774 } 4775 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4776 virtaddr = mboxq->sge_array->addr[0]; 4777 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 4778 shdr = &mbx_cntl_attr->cfg_shdr; 4779 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4780 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4781 if (shdr_status || shdr_add_status || rc) { 4782 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4783 "3085 Mailbox x%x (x%x/x%x) failed, " 4784 "rc:x%x, status:x%x, add_status:x%x\n", 4785 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4786 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4787 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4788 rc, shdr_status, shdr_add_status); 4789 rc = -ENXIO; 4790 goto out_free_mboxq; 4791 } 4792 cntl_attr = &mbx_cntl_attr->cntl_attr; 4793 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 4794 phba->sli4_hba.lnk_info.lnk_tp = 4795 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 4796 phba->sli4_hba.lnk_info.lnk_no = 4797 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 4798 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4799 "3086 lnk_type:%d, lnk_numb:%d\n", 4800 phba->sli4_hba.lnk_info.lnk_tp, 4801 phba->sli4_hba.lnk_info.lnk_no); 4802 4803 retrieve_ppname: 4804 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4805 LPFC_MBOX_OPCODE_GET_PORT_NAME, 4806 sizeof(struct lpfc_mbx_get_port_name) - 4807 sizeof(struct lpfc_sli4_cfg_mhdr), 4808 LPFC_SLI4_MBX_EMBED); 4809 get_port_name = &mboxq->u.mqe.un.get_port_name; 4810 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 4811 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 4812 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 4813 phba->sli4_hba.lnk_info.lnk_tp); 4814 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4815 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4816 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4817 if (shdr_status || shdr_add_status || rc) { 4818 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4819 "3087 Mailbox x%x (x%x/x%x) failed: " 4820 "rc:x%x, status:x%x, add_status:x%x\n", 4821 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4822 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4823 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4824 rc, shdr_status, shdr_add_status); 4825 rc = -ENXIO; 4826 goto out_free_mboxq; 4827 } 4828 switch (phba->sli4_hba.lnk_info.lnk_no) { 4829 case LPFC_LINK_NUMBER_0: 4830 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 4831 &get_port_name->u.response); 4832 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4833 break; 4834 case LPFC_LINK_NUMBER_1: 4835 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 4836 &get_port_name->u.response); 4837 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4838 break; 4839 case LPFC_LINK_NUMBER_2: 4840 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 4841 &get_port_name->u.response); 4842 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4843 break; 4844 case LPFC_LINK_NUMBER_3: 4845 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 4846 &get_port_name->u.response); 4847 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 4848 break; 4849 default: 4850 break; 4851 } 4852 4853 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 4854 phba->Port[0] = cport_name; 4855 phba->Port[1] = '\0'; 4856 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4857 "3091 SLI get port name: %s\n", phba->Port); 4858 } 4859 4860 out_free_mboxq: 4861 if (rc != MBX_TIMEOUT) { 4862 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 4863 lpfc_sli4_mbox_cmd_free(phba, mboxq); 4864 else 4865 mempool_free(mboxq, phba->mbox_mem_pool); 4866 } 4867 return rc; 4868 } 4869 4870 /** 4871 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4872 * @phba: pointer to lpfc hba data structure. 4873 * 4874 * This routine is called to explicitly arm the SLI4 device's completion and 4875 * event queues 4876 **/ 4877 static void 4878 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4879 { 4880 uint8_t fcp_eqidx; 4881 4882 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4883 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4884 fcp_eqidx = 0; 4885 if (phba->sli4_hba.fcp_cq) { 4886 do 4887 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4888 LPFC_QUEUE_REARM); 4889 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4890 } 4891 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4892 if (phba->sli4_hba.fp_eq) { 4893 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; 4894 fcp_eqidx++) 4895 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4896 LPFC_QUEUE_REARM); 4897 } 4898 } 4899 4900 /** 4901 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4902 * @phba: Pointer to HBA context object. 4903 * @type: The resource extent type. 4904 * @extnt_count: buffer to hold port available extent count. 4905 * @extnt_size: buffer to hold element count per extent. 4906 * 4907 * This function calls the port and retrievs the number of available 4908 * extents and their size for a particular extent type. 4909 * 4910 * Returns: 0 if successful. Nonzero otherwise. 4911 **/ 4912 int 4913 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4914 uint16_t *extnt_count, uint16_t *extnt_size) 4915 { 4916 int rc = 0; 4917 uint32_t length; 4918 uint32_t mbox_tmo; 4919 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 4920 LPFC_MBOXQ_t *mbox; 4921 4922 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4923 if (!mbox) 4924 return -ENOMEM; 4925 4926 /* Find out how many extents are available for this resource type */ 4927 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 4928 sizeof(struct lpfc_sli4_cfg_mhdr)); 4929 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 4930 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 4931 length, LPFC_SLI4_MBX_EMBED); 4932 4933 /* Send an extents count of 0 - the GET doesn't use it. */ 4934 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 4935 LPFC_SLI4_MBX_EMBED); 4936 if (unlikely(rc)) { 4937 rc = -EIO; 4938 goto err_exit; 4939 } 4940 4941 if (!phba->sli4_hba.intr_enable) 4942 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 4943 else { 4944 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 4945 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 4946 } 4947 if (unlikely(rc)) { 4948 rc = -EIO; 4949 goto err_exit; 4950 } 4951 4952 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 4953 if (bf_get(lpfc_mbox_hdr_status, 4954 &rsrc_info->header.cfg_shdr.response)) { 4955 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4956 "2930 Failed to get resource extents " 4957 "Status 0x%x Add'l Status 0x%x\n", 4958 bf_get(lpfc_mbox_hdr_status, 4959 &rsrc_info->header.cfg_shdr.response), 4960 bf_get(lpfc_mbox_hdr_add_status, 4961 &rsrc_info->header.cfg_shdr.response)); 4962 rc = -EIO; 4963 goto err_exit; 4964 } 4965 4966 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 4967 &rsrc_info->u.rsp); 4968 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 4969 &rsrc_info->u.rsp); 4970 err_exit: 4971 mempool_free(mbox, phba->mbox_mem_pool); 4972 return rc; 4973 } 4974 4975 /** 4976 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 4977 * @phba: Pointer to HBA context object. 4978 * @type: The extent type to check. 4979 * 4980 * This function reads the current available extents from the port and checks 4981 * if the extent count or extent size has changed since the last access. 4982 * Callers use this routine post port reset to understand if there is a 4983 * extent reprovisioning requirement. 4984 * 4985 * Returns: 4986 * -Error: error indicates problem. 4987 * 1: Extent count or size has changed. 4988 * 0: No changes. 4989 **/ 4990 static int 4991 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 4992 { 4993 uint16_t curr_ext_cnt, rsrc_ext_cnt; 4994 uint16_t size_diff, rsrc_ext_size; 4995 int rc = 0; 4996 struct lpfc_rsrc_blks *rsrc_entry; 4997 struct list_head *rsrc_blk_list = NULL; 4998 4999 size_diff = 0; 5000 curr_ext_cnt = 0; 5001 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5002 &rsrc_ext_cnt, 5003 &rsrc_ext_size); 5004 if (unlikely(rc)) 5005 return -EIO; 5006 5007 switch (type) { 5008 case LPFC_RSC_TYPE_FCOE_RPI: 5009 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5010 break; 5011 case LPFC_RSC_TYPE_FCOE_VPI: 5012 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5013 break; 5014 case LPFC_RSC_TYPE_FCOE_XRI: 5015 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5016 break; 5017 case LPFC_RSC_TYPE_FCOE_VFI: 5018 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5019 break; 5020 default: 5021 break; 5022 } 5023 5024 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5025 curr_ext_cnt++; 5026 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5027 size_diff++; 5028 } 5029 5030 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5031 rc = 1; 5032 5033 return rc; 5034 } 5035 5036 /** 5037 * lpfc_sli4_cfg_post_extnts - 5038 * @phba: Pointer to HBA context object. 5039 * @extnt_cnt - number of available extents. 5040 * @type - the extent type (rpi, xri, vfi, vpi). 5041 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5042 * @mbox - pointer to the caller's allocated mailbox structure. 5043 * 5044 * This function executes the extents allocation request. It also 5045 * takes care of the amount of memory needed to allocate or get the 5046 * allocated extents. It is the caller's responsibility to evaluate 5047 * the response. 5048 * 5049 * Returns: 5050 * -Error: Error value describes the condition found. 5051 * 0: if successful 5052 **/ 5053 static int 5054 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, 5055 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5056 { 5057 int rc = 0; 5058 uint32_t req_len; 5059 uint32_t emb_len; 5060 uint32_t alloc_len, mbox_tmo; 5061 5062 /* Calculate the total requested length of the dma memory */ 5063 req_len = *extnt_cnt * sizeof(uint16_t); 5064 5065 /* 5066 * Calculate the size of an embedded mailbox. The uint32_t 5067 * accounts for extents-specific word. 5068 */ 5069 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5070 sizeof(uint32_t); 5071 5072 /* 5073 * Presume the allocation and response will fit into an embedded 5074 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5075 */ 5076 *emb = LPFC_SLI4_MBX_EMBED; 5077 if (req_len > emb_len) { 5078 req_len = *extnt_cnt * sizeof(uint16_t) + 5079 sizeof(union lpfc_sli4_cfg_shdr) + 5080 sizeof(uint32_t); 5081 *emb = LPFC_SLI4_MBX_NEMBED; 5082 } 5083 5084 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5085 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5086 req_len, *emb); 5087 if (alloc_len < req_len) { 5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5089 "2982 Allocated DMA memory size (x%x) is " 5090 "less than the requested DMA memory " 5091 "size (x%x)\n", alloc_len, req_len); 5092 return -ENOMEM; 5093 } 5094 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb); 5095 if (unlikely(rc)) 5096 return -EIO; 5097 5098 if (!phba->sli4_hba.intr_enable) 5099 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5100 else { 5101 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5102 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5103 } 5104 5105 if (unlikely(rc)) 5106 rc = -EIO; 5107 return rc; 5108 } 5109 5110 /** 5111 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5112 * @phba: Pointer to HBA context object. 5113 * @type: The resource extent type to allocate. 5114 * 5115 * This function allocates the number of elements for the specified 5116 * resource type. 5117 **/ 5118 static int 5119 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5120 { 5121 bool emb = false; 5122 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5123 uint16_t rsrc_id, rsrc_start, j, k; 5124 uint16_t *ids; 5125 int i, rc; 5126 unsigned long longs; 5127 unsigned long *bmask; 5128 struct lpfc_rsrc_blks *rsrc_blks; 5129 LPFC_MBOXQ_t *mbox; 5130 uint32_t length; 5131 struct lpfc_id_range *id_array = NULL; 5132 void *virtaddr = NULL; 5133 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5134 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5135 struct list_head *ext_blk_list; 5136 5137 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5138 &rsrc_cnt, 5139 &rsrc_size); 5140 if (unlikely(rc)) 5141 return -EIO; 5142 5143 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5145 "3009 No available Resource Extents " 5146 "for resource type 0x%x: Count: 0x%x, " 5147 "Size 0x%x\n", type, rsrc_cnt, 5148 rsrc_size); 5149 return -ENOMEM; 5150 } 5151 5152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, 5153 "2903 Available Resource Extents " 5154 "for resource type 0x%x: Count: 0x%x, " 5155 "Size 0x%x\n", type, rsrc_cnt, 5156 rsrc_size); 5157 5158 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5159 if (!mbox) 5160 return -ENOMEM; 5161 5162 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox); 5163 if (unlikely(rc)) { 5164 rc = -EIO; 5165 goto err_exit; 5166 } 5167 5168 /* 5169 * Figure out where the response is located. Then get local pointers 5170 * to the response data. The port does not guarantee to respond to 5171 * all extents counts request so update the local variable with the 5172 * allocated count from the port. 5173 */ 5174 if (emb == LPFC_SLI4_MBX_EMBED) { 5175 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5176 id_array = &rsrc_ext->u.rsp.id[0]; 5177 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5178 } else { 5179 virtaddr = mbox->sge_array->addr[0]; 5180 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5181 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5182 id_array = &n_rsrc->id; 5183 } 5184 5185 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5186 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5187 5188 /* 5189 * Based on the resource size and count, correct the base and max 5190 * resource values. 5191 */ 5192 length = sizeof(struct lpfc_rsrc_blks); 5193 switch (type) { 5194 case LPFC_RSC_TYPE_FCOE_RPI: 5195 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5196 sizeof(unsigned long), 5197 GFP_KERNEL); 5198 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5199 rc = -ENOMEM; 5200 goto err_exit; 5201 } 5202 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5203 sizeof(uint16_t), 5204 GFP_KERNEL); 5205 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5206 kfree(phba->sli4_hba.rpi_bmask); 5207 rc = -ENOMEM; 5208 goto err_exit; 5209 } 5210 5211 /* 5212 * The next_rpi was initialized with the maximum available 5213 * count but the port may allocate a smaller number. Catch 5214 * that case and update the next_rpi. 5215 */ 5216 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5217 5218 /* Initialize local ptrs for common extent processing later. */ 5219 bmask = phba->sli4_hba.rpi_bmask; 5220 ids = phba->sli4_hba.rpi_ids; 5221 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5222 break; 5223 case LPFC_RSC_TYPE_FCOE_VPI: 5224 phba->vpi_bmask = kzalloc(longs * 5225 sizeof(unsigned long), 5226 GFP_KERNEL); 5227 if (unlikely(!phba->vpi_bmask)) { 5228 rc = -ENOMEM; 5229 goto err_exit; 5230 } 5231 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5232 sizeof(uint16_t), 5233 GFP_KERNEL); 5234 if (unlikely(!phba->vpi_ids)) { 5235 kfree(phba->vpi_bmask); 5236 rc = -ENOMEM; 5237 goto err_exit; 5238 } 5239 5240 /* Initialize local ptrs for common extent processing later. */ 5241 bmask = phba->vpi_bmask; 5242 ids = phba->vpi_ids; 5243 ext_blk_list = &phba->lpfc_vpi_blk_list; 5244 break; 5245 case LPFC_RSC_TYPE_FCOE_XRI: 5246 phba->sli4_hba.xri_bmask = kzalloc(longs * 5247 sizeof(unsigned long), 5248 GFP_KERNEL); 5249 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5250 rc = -ENOMEM; 5251 goto err_exit; 5252 } 5253 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5254 sizeof(uint16_t), 5255 GFP_KERNEL); 5256 if (unlikely(!phba->sli4_hba.xri_ids)) { 5257 kfree(phba->sli4_hba.xri_bmask); 5258 rc = -ENOMEM; 5259 goto err_exit; 5260 } 5261 5262 /* Initialize local ptrs for common extent processing later. */ 5263 bmask = phba->sli4_hba.xri_bmask; 5264 ids = phba->sli4_hba.xri_ids; 5265 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5266 break; 5267 case LPFC_RSC_TYPE_FCOE_VFI: 5268 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5269 sizeof(unsigned long), 5270 GFP_KERNEL); 5271 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5272 rc = -ENOMEM; 5273 goto err_exit; 5274 } 5275 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5276 sizeof(uint16_t), 5277 GFP_KERNEL); 5278 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5279 kfree(phba->sli4_hba.vfi_bmask); 5280 rc = -ENOMEM; 5281 goto err_exit; 5282 } 5283 5284 /* Initialize local ptrs for common extent processing later. */ 5285 bmask = phba->sli4_hba.vfi_bmask; 5286 ids = phba->sli4_hba.vfi_ids; 5287 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5288 break; 5289 default: 5290 /* Unsupported Opcode. Fail call. */ 5291 id_array = NULL; 5292 bmask = NULL; 5293 ids = NULL; 5294 ext_blk_list = NULL; 5295 goto err_exit; 5296 } 5297 5298 /* 5299 * Complete initializing the extent configuration with the 5300 * allocated ids assigned to this function. The bitmask serves 5301 * as an index into the array and manages the available ids. The 5302 * array just stores the ids communicated to the port via the wqes. 5303 */ 5304 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5305 if ((i % 2) == 0) 5306 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5307 &id_array[k]); 5308 else 5309 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5310 &id_array[k]); 5311 5312 rsrc_blks = kzalloc(length, GFP_KERNEL); 5313 if (unlikely(!rsrc_blks)) { 5314 rc = -ENOMEM; 5315 kfree(bmask); 5316 kfree(ids); 5317 goto err_exit; 5318 } 5319 rsrc_blks->rsrc_start = rsrc_id; 5320 rsrc_blks->rsrc_size = rsrc_size; 5321 list_add_tail(&rsrc_blks->list, ext_blk_list); 5322 rsrc_start = rsrc_id; 5323 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) 5324 phba->sli4_hba.scsi_xri_start = rsrc_start + 5325 lpfc_sli4_get_els_iocb_cnt(phba); 5326 5327 while (rsrc_id < (rsrc_start + rsrc_size)) { 5328 ids[j] = rsrc_id; 5329 rsrc_id++; 5330 j++; 5331 } 5332 /* Entire word processed. Get next word.*/ 5333 if ((i % 2) == 1) 5334 k++; 5335 } 5336 err_exit: 5337 lpfc_sli4_mbox_cmd_free(phba, mbox); 5338 return rc; 5339 } 5340 5341 /** 5342 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5343 * @phba: Pointer to HBA context object. 5344 * @type: the extent's type. 5345 * 5346 * This function deallocates all extents of a particular resource type. 5347 * SLI4 does not allow for deallocating a particular extent range. It 5348 * is the caller's responsibility to release all kernel memory resources. 5349 **/ 5350 static int 5351 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5352 { 5353 int rc; 5354 uint32_t length, mbox_tmo = 0; 5355 LPFC_MBOXQ_t *mbox; 5356 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5357 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5358 5359 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5360 if (!mbox) 5361 return -ENOMEM; 5362 5363 /* 5364 * This function sends an embedded mailbox because it only sends the 5365 * the resource type. All extents of this type are released by the 5366 * port. 5367 */ 5368 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5369 sizeof(struct lpfc_sli4_cfg_mhdr)); 5370 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5371 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5372 length, LPFC_SLI4_MBX_EMBED); 5373 5374 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5375 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5376 LPFC_SLI4_MBX_EMBED); 5377 if (unlikely(rc)) { 5378 rc = -EIO; 5379 goto out_free_mbox; 5380 } 5381 if (!phba->sli4_hba.intr_enable) 5382 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5383 else { 5384 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5385 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5386 } 5387 if (unlikely(rc)) { 5388 rc = -EIO; 5389 goto out_free_mbox; 5390 } 5391 5392 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5393 if (bf_get(lpfc_mbox_hdr_status, 5394 &dealloc_rsrc->header.cfg_shdr.response)) { 5395 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5396 "2919 Failed to release resource extents " 5397 "for type %d - Status 0x%x Add'l Status 0x%x. " 5398 "Resource memory not released.\n", 5399 type, 5400 bf_get(lpfc_mbox_hdr_status, 5401 &dealloc_rsrc->header.cfg_shdr.response), 5402 bf_get(lpfc_mbox_hdr_add_status, 5403 &dealloc_rsrc->header.cfg_shdr.response)); 5404 rc = -EIO; 5405 goto out_free_mbox; 5406 } 5407 5408 /* Release kernel memory resources for the specific type. */ 5409 switch (type) { 5410 case LPFC_RSC_TYPE_FCOE_VPI: 5411 kfree(phba->vpi_bmask); 5412 kfree(phba->vpi_ids); 5413 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5414 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5415 &phba->lpfc_vpi_blk_list, list) { 5416 list_del_init(&rsrc_blk->list); 5417 kfree(rsrc_blk); 5418 } 5419 break; 5420 case LPFC_RSC_TYPE_FCOE_XRI: 5421 kfree(phba->sli4_hba.xri_bmask); 5422 kfree(phba->sli4_hba.xri_ids); 5423 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5424 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5425 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5426 list_del_init(&rsrc_blk->list); 5427 kfree(rsrc_blk); 5428 } 5429 break; 5430 case LPFC_RSC_TYPE_FCOE_VFI: 5431 kfree(phba->sli4_hba.vfi_bmask); 5432 kfree(phba->sli4_hba.vfi_ids); 5433 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5434 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5435 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5436 list_del_init(&rsrc_blk->list); 5437 kfree(rsrc_blk); 5438 } 5439 break; 5440 case LPFC_RSC_TYPE_FCOE_RPI: 5441 /* RPI bitmask and physical id array are cleaned up earlier. */ 5442 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5443 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5444 list_del_init(&rsrc_blk->list); 5445 kfree(rsrc_blk); 5446 } 5447 break; 5448 default: 5449 break; 5450 } 5451 5452 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5453 5454 out_free_mbox: 5455 mempool_free(mbox, phba->mbox_mem_pool); 5456 return rc; 5457 } 5458 5459 /** 5460 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5461 * @phba: Pointer to HBA context object. 5462 * 5463 * This function allocates all SLI4 resource identifiers. 5464 **/ 5465 int 5466 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5467 { 5468 int i, rc, error = 0; 5469 uint16_t count, base; 5470 unsigned long longs; 5471 5472 if (!phba->sli4_hba.rpi_hdrs_in_use) 5473 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5474 if (phba->sli4_hba.extents_in_use) { 5475 /* 5476 * The port supports resource extents. The XRI, VPI, VFI, RPI 5477 * resource extent count must be read and allocated before 5478 * provisioning the resource id arrays. 5479 */ 5480 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5481 LPFC_IDX_RSRC_RDY) { 5482 /* 5483 * Extent-based resources are set - the driver could 5484 * be in a port reset. Figure out if any corrective 5485 * actions need to be taken. 5486 */ 5487 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5488 LPFC_RSC_TYPE_FCOE_VFI); 5489 if (rc != 0) 5490 error++; 5491 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5492 LPFC_RSC_TYPE_FCOE_VPI); 5493 if (rc != 0) 5494 error++; 5495 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5496 LPFC_RSC_TYPE_FCOE_XRI); 5497 if (rc != 0) 5498 error++; 5499 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5500 LPFC_RSC_TYPE_FCOE_RPI); 5501 if (rc != 0) 5502 error++; 5503 5504 /* 5505 * It's possible that the number of resources 5506 * provided to this port instance changed between 5507 * resets. Detect this condition and reallocate 5508 * resources. Otherwise, there is no action. 5509 */ 5510 if (error) { 5511 lpfc_printf_log(phba, KERN_INFO, 5512 LOG_MBOX | LOG_INIT, 5513 "2931 Detected extent resource " 5514 "change. Reallocating all " 5515 "extents.\n"); 5516 rc = lpfc_sli4_dealloc_extent(phba, 5517 LPFC_RSC_TYPE_FCOE_VFI); 5518 rc = lpfc_sli4_dealloc_extent(phba, 5519 LPFC_RSC_TYPE_FCOE_VPI); 5520 rc = lpfc_sli4_dealloc_extent(phba, 5521 LPFC_RSC_TYPE_FCOE_XRI); 5522 rc = lpfc_sli4_dealloc_extent(phba, 5523 LPFC_RSC_TYPE_FCOE_RPI); 5524 } else 5525 return 0; 5526 } 5527 5528 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5529 if (unlikely(rc)) 5530 goto err_exit; 5531 5532 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5533 if (unlikely(rc)) 5534 goto err_exit; 5535 5536 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5537 if (unlikely(rc)) 5538 goto err_exit; 5539 5540 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5541 if (unlikely(rc)) 5542 goto err_exit; 5543 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5544 LPFC_IDX_RSRC_RDY); 5545 return rc; 5546 } else { 5547 /* 5548 * The port does not support resource extents. The XRI, VPI, 5549 * VFI, RPI resource ids were determined from READ_CONFIG. 5550 * Just allocate the bitmasks and provision the resource id 5551 * arrays. If a port reset is active, the resources don't 5552 * need any action - just exit. 5553 */ 5554 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5555 LPFC_IDX_RSRC_RDY) { 5556 lpfc_sli4_dealloc_resource_identifiers(phba); 5557 lpfc_sli4_remove_rpis(phba); 5558 } 5559 /* RPIs. */ 5560 count = phba->sli4_hba.max_cfg_param.max_rpi; 5561 base = phba->sli4_hba.max_cfg_param.rpi_base; 5562 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5563 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5564 sizeof(unsigned long), 5565 GFP_KERNEL); 5566 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5567 rc = -ENOMEM; 5568 goto err_exit; 5569 } 5570 phba->sli4_hba.rpi_ids = kzalloc(count * 5571 sizeof(uint16_t), 5572 GFP_KERNEL); 5573 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5574 rc = -ENOMEM; 5575 goto free_rpi_bmask; 5576 } 5577 5578 for (i = 0; i < count; i++) 5579 phba->sli4_hba.rpi_ids[i] = base + i; 5580 5581 /* VPIs. */ 5582 count = phba->sli4_hba.max_cfg_param.max_vpi; 5583 base = phba->sli4_hba.max_cfg_param.vpi_base; 5584 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5585 phba->vpi_bmask = kzalloc(longs * 5586 sizeof(unsigned long), 5587 GFP_KERNEL); 5588 if (unlikely(!phba->vpi_bmask)) { 5589 rc = -ENOMEM; 5590 goto free_rpi_ids; 5591 } 5592 phba->vpi_ids = kzalloc(count * 5593 sizeof(uint16_t), 5594 GFP_KERNEL); 5595 if (unlikely(!phba->vpi_ids)) { 5596 rc = -ENOMEM; 5597 goto free_vpi_bmask; 5598 } 5599 5600 for (i = 0; i < count; i++) 5601 phba->vpi_ids[i] = base + i; 5602 5603 /* XRIs. */ 5604 count = phba->sli4_hba.max_cfg_param.max_xri; 5605 base = phba->sli4_hba.max_cfg_param.xri_base; 5606 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5607 phba->sli4_hba.xri_bmask = kzalloc(longs * 5608 sizeof(unsigned long), 5609 GFP_KERNEL); 5610 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5611 rc = -ENOMEM; 5612 goto free_vpi_ids; 5613 } 5614 phba->sli4_hba.max_cfg_param.xri_used = 0; 5615 phba->sli4_hba.xri_count = 0; 5616 phba->sli4_hba.xri_ids = kzalloc(count * 5617 sizeof(uint16_t), 5618 GFP_KERNEL); 5619 if (unlikely(!phba->sli4_hba.xri_ids)) { 5620 rc = -ENOMEM; 5621 goto free_xri_bmask; 5622 } 5623 5624 for (i = 0; i < count; i++) 5625 phba->sli4_hba.xri_ids[i] = base + i; 5626 5627 /* VFIs. */ 5628 count = phba->sli4_hba.max_cfg_param.max_vfi; 5629 base = phba->sli4_hba.max_cfg_param.vfi_base; 5630 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5631 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5632 sizeof(unsigned long), 5633 GFP_KERNEL); 5634 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5635 rc = -ENOMEM; 5636 goto free_xri_ids; 5637 } 5638 phba->sli4_hba.vfi_ids = kzalloc(count * 5639 sizeof(uint16_t), 5640 GFP_KERNEL); 5641 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5642 rc = -ENOMEM; 5643 goto free_vfi_bmask; 5644 } 5645 5646 for (i = 0; i < count; i++) 5647 phba->sli4_hba.vfi_ids[i] = base + i; 5648 5649 /* 5650 * Mark all resources ready. An HBA reset doesn't need 5651 * to reset the initialization. 5652 */ 5653 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5654 LPFC_IDX_RSRC_RDY); 5655 return 0; 5656 } 5657 5658 free_vfi_bmask: 5659 kfree(phba->sli4_hba.vfi_bmask); 5660 free_xri_ids: 5661 kfree(phba->sli4_hba.xri_ids); 5662 free_xri_bmask: 5663 kfree(phba->sli4_hba.xri_bmask); 5664 free_vpi_ids: 5665 kfree(phba->vpi_ids); 5666 free_vpi_bmask: 5667 kfree(phba->vpi_bmask); 5668 free_rpi_ids: 5669 kfree(phba->sli4_hba.rpi_ids); 5670 free_rpi_bmask: 5671 kfree(phba->sli4_hba.rpi_bmask); 5672 err_exit: 5673 return rc; 5674 } 5675 5676 /** 5677 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 5678 * @phba: Pointer to HBA context object. 5679 * 5680 * This function allocates the number of elements for the specified 5681 * resource type. 5682 **/ 5683 int 5684 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 5685 { 5686 if (phba->sli4_hba.extents_in_use) { 5687 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5688 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5689 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5690 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5691 } else { 5692 kfree(phba->vpi_bmask); 5693 kfree(phba->vpi_ids); 5694 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5695 kfree(phba->sli4_hba.xri_bmask); 5696 kfree(phba->sli4_hba.xri_ids); 5697 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5698 kfree(phba->sli4_hba.vfi_bmask); 5699 kfree(phba->sli4_hba.vfi_ids); 5700 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5701 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5702 } 5703 5704 return 0; 5705 } 5706 5707 /** 5708 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 5709 * @phba: Pointer to HBA context object. 5710 * @type: The resource extent type. 5711 * @extnt_count: buffer to hold port extent count response 5712 * @extnt_size: buffer to hold port extent size response. 5713 * 5714 * This function calls the port to read the host allocated extents 5715 * for a particular type. 5716 **/ 5717 int 5718 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 5719 uint16_t *extnt_cnt, uint16_t *extnt_size) 5720 { 5721 bool emb; 5722 int rc = 0; 5723 uint16_t curr_blks = 0; 5724 uint32_t req_len, emb_len; 5725 uint32_t alloc_len, mbox_tmo; 5726 struct list_head *blk_list_head; 5727 struct lpfc_rsrc_blks *rsrc_blk; 5728 LPFC_MBOXQ_t *mbox; 5729 void *virtaddr = NULL; 5730 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5731 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5732 union lpfc_sli4_cfg_shdr *shdr; 5733 5734 switch (type) { 5735 case LPFC_RSC_TYPE_FCOE_VPI: 5736 blk_list_head = &phba->lpfc_vpi_blk_list; 5737 break; 5738 case LPFC_RSC_TYPE_FCOE_XRI: 5739 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 5740 break; 5741 case LPFC_RSC_TYPE_FCOE_VFI: 5742 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 5743 break; 5744 case LPFC_RSC_TYPE_FCOE_RPI: 5745 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 5746 break; 5747 default: 5748 return -EIO; 5749 } 5750 5751 /* Count the number of extents currently allocatd for this type. */ 5752 list_for_each_entry(rsrc_blk, blk_list_head, list) { 5753 if (curr_blks == 0) { 5754 /* 5755 * The GET_ALLOCATED mailbox does not return the size, 5756 * just the count. The size should be just the size 5757 * stored in the current allocated block and all sizes 5758 * for an extent type are the same so set the return 5759 * value now. 5760 */ 5761 *extnt_size = rsrc_blk->rsrc_size; 5762 } 5763 curr_blks++; 5764 } 5765 5766 /* Calculate the total requested length of the dma memory. */ 5767 req_len = curr_blks * sizeof(uint16_t); 5768 5769 /* 5770 * Calculate the size of an embedded mailbox. The uint32_t 5771 * accounts for extents-specific word. 5772 */ 5773 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5774 sizeof(uint32_t); 5775 5776 /* 5777 * Presume the allocation and response will fit into an embedded 5778 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5779 */ 5780 emb = LPFC_SLI4_MBX_EMBED; 5781 req_len = emb_len; 5782 if (req_len > emb_len) { 5783 req_len = curr_blks * sizeof(uint16_t) + 5784 sizeof(union lpfc_sli4_cfg_shdr) + 5785 sizeof(uint32_t); 5786 emb = LPFC_SLI4_MBX_NEMBED; 5787 } 5788 5789 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5790 if (!mbox) 5791 return -ENOMEM; 5792 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 5793 5794 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5795 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 5796 req_len, emb); 5797 if (alloc_len < req_len) { 5798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5799 "2983 Allocated DMA memory size (x%x) is " 5800 "less than the requested DMA memory " 5801 "size (x%x)\n", alloc_len, req_len); 5802 rc = -ENOMEM; 5803 goto err_exit; 5804 } 5805 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 5806 if (unlikely(rc)) { 5807 rc = -EIO; 5808 goto err_exit; 5809 } 5810 5811 if (!phba->sli4_hba.intr_enable) 5812 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5813 else { 5814 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5815 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5816 } 5817 5818 if (unlikely(rc)) { 5819 rc = -EIO; 5820 goto err_exit; 5821 } 5822 5823 /* 5824 * Figure out where the response is located. Then get local pointers 5825 * to the response data. The port does not guarantee to respond to 5826 * all extents counts request so update the local variable with the 5827 * allocated count from the port. 5828 */ 5829 if (emb == LPFC_SLI4_MBX_EMBED) { 5830 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5831 shdr = &rsrc_ext->header.cfg_shdr; 5832 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5833 } else { 5834 virtaddr = mbox->sge_array->addr[0]; 5835 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5836 shdr = &n_rsrc->cfg_shdr; 5837 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5838 } 5839 5840 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 5841 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5842 "2984 Failed to read allocated resources " 5843 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 5844 type, 5845 bf_get(lpfc_mbox_hdr_status, &shdr->response), 5846 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 5847 rc = -EIO; 5848 goto err_exit; 5849 } 5850 err_exit: 5851 lpfc_sli4_mbox_cmd_free(phba, mbox); 5852 return rc; 5853 } 5854 5855 /** 5856 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5857 * @phba: Pointer to HBA context object. 5858 * 5859 * This function is the main SLI4 device intialization PCI function. This 5860 * function is called by the HBA intialization code, HBA reset code and 5861 * HBA error attention handler code. Caller is not required to hold any 5862 * locks. 5863 **/ 5864 int 5865 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 5866 { 5867 int rc; 5868 LPFC_MBOXQ_t *mboxq; 5869 struct lpfc_mqe *mqe; 5870 uint8_t *vpd; 5871 uint32_t vpd_size; 5872 uint32_t ftr_rsp = 0; 5873 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 5874 struct lpfc_vport *vport = phba->pport; 5875 struct lpfc_dmabuf *mp; 5876 5877 /* Perform a PCI function reset to start from clean */ 5878 rc = lpfc_pci_function_reset(phba); 5879 if (unlikely(rc)) 5880 return -ENODEV; 5881 5882 /* Check the HBA Host Status Register for readyness */ 5883 rc = lpfc_sli4_post_status_check(phba); 5884 if (unlikely(rc)) 5885 return -ENODEV; 5886 else { 5887 spin_lock_irq(&phba->hbalock); 5888 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 5889 spin_unlock_irq(&phba->hbalock); 5890 } 5891 5892 /* 5893 * Allocate a single mailbox container for initializing the 5894 * port. 5895 */ 5896 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5897 if (!mboxq) 5898 return -ENOMEM; 5899 5900 /* Issue READ_REV to collect vpd and FW information. */ 5901 vpd_size = SLI4_PAGE_SIZE; 5902 vpd = kzalloc(vpd_size, GFP_KERNEL); 5903 if (!vpd) { 5904 rc = -ENOMEM; 5905 goto out_free_mbox; 5906 } 5907 5908 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 5909 if (unlikely(rc)) { 5910 kfree(vpd); 5911 goto out_free_mbox; 5912 } 5913 mqe = &mboxq->u.mqe; 5914 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 5915 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 5916 phba->hba_flag |= HBA_FCOE_MODE; 5917 else 5918 phba->hba_flag &= ~HBA_FCOE_MODE; 5919 5920 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 5921 LPFC_DCBX_CEE_MODE) 5922 phba->hba_flag |= HBA_FIP_SUPPORT; 5923 else 5924 phba->hba_flag &= ~HBA_FIP_SUPPORT; 5925 5926 if (phba->sli_rev != LPFC_SLI_REV4) { 5927 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5928 "0376 READ_REV Error. SLI Level %d " 5929 "FCoE enabled %d\n", 5930 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 5931 rc = -EIO; 5932 kfree(vpd); 5933 goto out_free_mbox; 5934 } 5935 5936 /* 5937 * Continue initialization with default values even if driver failed 5938 * to read FCoE param config regions, only read parameters if the 5939 * board is FCoE 5940 */ 5941 if (phba->hba_flag & HBA_FCOE_MODE && 5942 lpfc_sli4_read_fcoe_params(phba)) 5943 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 5944 "2570 Failed to read FCoE parameters\n"); 5945 5946 /* 5947 * Retrieve sli4 device physical port name, failure of doing it 5948 * is considered as non-fatal. 5949 */ 5950 rc = lpfc_sli4_retrieve_pport_name(phba); 5951 if (!rc) 5952 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5953 "3080 Successful retrieving SLI4 device " 5954 "physical port name: %s.\n", phba->Port); 5955 5956 /* 5957 * Evaluate the read rev and vpd data. Populate the driver 5958 * state with the results. If this routine fails, the failure 5959 * is not fatal as the driver will use generic values. 5960 */ 5961 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 5962 if (unlikely(!rc)) { 5963 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5964 "0377 Error %d parsing vpd. " 5965 "Using defaults.\n", rc); 5966 rc = 0; 5967 } 5968 kfree(vpd); 5969 5970 /* Save information as VPD data */ 5971 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 5972 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 5973 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 5974 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 5975 &mqe->un.read_rev); 5976 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 5977 &mqe->un.read_rev); 5978 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 5979 &mqe->un.read_rev); 5980 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 5981 &mqe->un.read_rev); 5982 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 5983 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 5984 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 5985 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 5986 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 5987 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 5988 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5989 "(%d):0380 READ_REV Status x%x " 5990 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 5991 mboxq->vport ? mboxq->vport->vpi : 0, 5992 bf_get(lpfc_mqe_status, mqe), 5993 phba->vpd.rev.opFwName, 5994 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 5995 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 5996 5997 /* 5998 * Discover the port's supported feature set and match it against the 5999 * hosts requests. 6000 */ 6001 lpfc_request_features(phba, mboxq); 6002 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6003 if (unlikely(rc)) { 6004 rc = -EIO; 6005 goto out_free_mbox; 6006 } 6007 6008 /* 6009 * The port must support FCP initiator mode as this is the 6010 * only mode running in the host. 6011 */ 6012 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6013 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6014 "0378 No support for fcpi mode.\n"); 6015 ftr_rsp++; 6016 } 6017 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6018 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6019 else 6020 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6021 /* 6022 * If the port cannot support the host's requested features 6023 * then turn off the global config parameters to disable the 6024 * feature in the driver. This is not a fatal error. 6025 */ 6026 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6027 if (phba->cfg_enable_bg) { 6028 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6029 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6030 else 6031 ftr_rsp++; 6032 } 6033 6034 if (phba->max_vpi && phba->cfg_enable_npiv && 6035 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6036 ftr_rsp++; 6037 6038 if (ftr_rsp) { 6039 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6040 "0379 Feature Mismatch Data: x%08x %08x " 6041 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6042 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6043 phba->cfg_enable_npiv, phba->max_vpi); 6044 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6045 phba->cfg_enable_bg = 0; 6046 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6047 phba->cfg_enable_npiv = 0; 6048 } 6049 6050 /* These SLI3 features are assumed in SLI4 */ 6051 spin_lock_irq(&phba->hbalock); 6052 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6053 spin_unlock_irq(&phba->hbalock); 6054 6055 /* 6056 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6057 * calls depends on these resources to complete port setup. 6058 */ 6059 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6060 if (rc) { 6061 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6062 "2920 Failed to alloc Resource IDs " 6063 "rc = x%x\n", rc); 6064 goto out_free_mbox; 6065 } 6066 /* update physical xri mappings in the scsi buffers */ 6067 lpfc_scsi_buf_update(phba); 6068 6069 /* Read the port's service parameters. */ 6070 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6071 if (rc) { 6072 phba->link_state = LPFC_HBA_ERROR; 6073 rc = -ENOMEM; 6074 goto out_free_mbox; 6075 } 6076 6077 mboxq->vport = vport; 6078 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6079 mp = (struct lpfc_dmabuf *) mboxq->context1; 6080 if (rc == MBX_SUCCESS) { 6081 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6082 rc = 0; 6083 } 6084 6085 /* 6086 * This memory was allocated by the lpfc_read_sparam routine. Release 6087 * it to the mbuf pool. 6088 */ 6089 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6090 kfree(mp); 6091 mboxq->context1 = NULL; 6092 if (unlikely(rc)) { 6093 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6094 "0382 READ_SPARAM command failed " 6095 "status %d, mbxStatus x%x\n", 6096 rc, bf_get(lpfc_mqe_status, mqe)); 6097 phba->link_state = LPFC_HBA_ERROR; 6098 rc = -EIO; 6099 goto out_free_mbox; 6100 } 6101 6102 lpfc_update_vport_wwn(vport); 6103 6104 /* Update the fc_host data structures with new wwn. */ 6105 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6106 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6107 6108 /* Register SGL pool to the device using non-embedded mailbox command */ 6109 if (!phba->sli4_hba.extents_in_use) { 6110 rc = lpfc_sli4_post_els_sgl_list(phba); 6111 if (unlikely(rc)) { 6112 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6113 "0582 Error %d during els sgl post " 6114 "operation\n", rc); 6115 rc = -ENODEV; 6116 goto out_free_mbox; 6117 } 6118 } else { 6119 rc = lpfc_sli4_post_els_sgl_list_ext(phba); 6120 if (unlikely(rc)) { 6121 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6122 "2560 Error %d during els sgl post " 6123 "operation\n", rc); 6124 rc = -ENODEV; 6125 goto out_free_mbox; 6126 } 6127 } 6128 6129 /* Register SCSI SGL pool to the device */ 6130 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6131 if (unlikely(rc)) { 6132 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6133 "0383 Error %d during scsi sgl post " 6134 "operation\n", rc); 6135 /* Some Scsi buffers were moved to the abort scsi list */ 6136 /* A pci function reset will repost them */ 6137 rc = -ENODEV; 6138 goto out_free_mbox; 6139 } 6140 6141 /* Post the rpi header region to the device. */ 6142 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 6143 if (unlikely(rc)) { 6144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6145 "0393 Error %d during rpi post operation\n", 6146 rc); 6147 rc = -ENODEV; 6148 goto out_free_mbox; 6149 } 6150 lpfc_sli4_node_prep(phba); 6151 6152 /* Create all the SLI4 queues */ 6153 rc = lpfc_sli4_queue_create(phba); 6154 if (rc) { 6155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6156 "3089 Failed to allocate queues\n"); 6157 rc = -ENODEV; 6158 goto out_stop_timers; 6159 } 6160 /* Set up all the queues to the device */ 6161 rc = lpfc_sli4_queue_setup(phba); 6162 if (unlikely(rc)) { 6163 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6164 "0381 Error %d during queue setup.\n ", rc); 6165 goto out_destroy_queue; 6166 } 6167 6168 /* Arm the CQs and then EQs on device */ 6169 lpfc_sli4_arm_cqeq_intr(phba); 6170 6171 /* Indicate device interrupt mode */ 6172 phba->sli4_hba.intr_enable = 1; 6173 6174 /* Allow asynchronous mailbox command to go through */ 6175 spin_lock_irq(&phba->hbalock); 6176 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6177 spin_unlock_irq(&phba->hbalock); 6178 6179 /* Post receive buffers to the device */ 6180 lpfc_sli4_rb_setup(phba); 6181 6182 /* Reset HBA FCF states after HBA reset */ 6183 phba->fcf.fcf_flag = 0; 6184 phba->fcf.current_rec.flag = 0; 6185 6186 /* Start the ELS watchdog timer */ 6187 mod_timer(&vport->els_tmofunc, 6188 jiffies + HZ * (phba->fc_ratov * 2)); 6189 6190 /* Start heart beat timer */ 6191 mod_timer(&phba->hb_tmofunc, 6192 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 6193 phba->hb_outstanding = 0; 6194 phba->last_completion_time = jiffies; 6195 6196 /* Start error attention (ERATT) polling timer */ 6197 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 6198 6199 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6200 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6201 rc = pci_enable_pcie_error_reporting(phba->pcidev); 6202 if (!rc) { 6203 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6204 "2829 This device supports " 6205 "Advanced Error Reporting (AER)\n"); 6206 spin_lock_irq(&phba->hbalock); 6207 phba->hba_flag |= HBA_AER_ENABLED; 6208 spin_unlock_irq(&phba->hbalock); 6209 } else { 6210 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6211 "2830 This device does not support " 6212 "Advanced Error Reporting (AER)\n"); 6213 phba->cfg_aer_support = 0; 6214 } 6215 rc = 0; 6216 } 6217 6218 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 6219 /* 6220 * The FC Port needs to register FCFI (index 0) 6221 */ 6222 lpfc_reg_fcfi(phba, mboxq); 6223 mboxq->vport = phba->pport; 6224 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6225 if (rc != MBX_SUCCESS) 6226 goto out_unset_queue; 6227 rc = 0; 6228 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6229 &mboxq->u.mqe.un.reg_fcfi); 6230 6231 /* Check if the port is configured to be disabled */ 6232 lpfc_sli_read_link_ste(phba); 6233 } 6234 6235 /* 6236 * The port is ready, set the host's link state to LINK_DOWN 6237 * in preparation for link interrupts. 6238 */ 6239 spin_lock_irq(&phba->hbalock); 6240 phba->link_state = LPFC_LINK_DOWN; 6241 spin_unlock_irq(&phba->hbalock); 6242 if (!(phba->hba_flag & HBA_FCOE_MODE) && 6243 (phba->hba_flag & LINK_DISABLED)) { 6244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6245 "3103 Adapter Link is disabled.\n"); 6246 lpfc_down_link(phba, mboxq); 6247 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6248 if (rc != MBX_SUCCESS) { 6249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6250 "3104 Adapter failed to issue " 6251 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 6252 goto out_unset_queue; 6253 } 6254 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 6255 /* don't perform init_link on SLI4 FC port loopback test */ 6256 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 6257 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6258 if (rc) 6259 goto out_unset_queue; 6260 } 6261 } 6262 mempool_free(mboxq, phba->mbox_mem_pool); 6263 return rc; 6264 out_unset_queue: 6265 /* Unset all the queues set up in this routine when error out */ 6266 lpfc_sli4_queue_unset(phba); 6267 out_destroy_queue: 6268 lpfc_sli4_queue_destroy(phba); 6269 out_stop_timers: 6270 lpfc_stop_hba_timers(phba); 6271 out_free_mbox: 6272 mempool_free(mboxq, phba->mbox_mem_pool); 6273 return rc; 6274 } 6275 6276 /** 6277 * lpfc_mbox_timeout - Timeout call back function for mbox timer 6278 * @ptr: context object - pointer to hba structure. 6279 * 6280 * This is the callback function for mailbox timer. The mailbox 6281 * timer is armed when a new mailbox command is issued and the timer 6282 * is deleted when the mailbox complete. The function is called by 6283 * the kernel timer code when a mailbox does not complete within 6284 * expected time. This function wakes up the worker thread to 6285 * process the mailbox timeout and returns. All the processing is 6286 * done by the worker thread function lpfc_mbox_timeout_handler. 6287 **/ 6288 void 6289 lpfc_mbox_timeout(unsigned long ptr) 6290 { 6291 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 6292 unsigned long iflag; 6293 uint32_t tmo_posted; 6294 6295 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 6296 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 6297 if (!tmo_posted) 6298 phba->pport->work_port_events |= WORKER_MBOX_TMO; 6299 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 6300 6301 if (!tmo_posted) 6302 lpfc_worker_wake_up(phba); 6303 return; 6304 } 6305 6306 6307 /** 6308 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 6309 * @phba: Pointer to HBA context object. 6310 * 6311 * This function is called from worker thread when a mailbox command times out. 6312 * The caller is not required to hold any locks. This function will reset the 6313 * HBA and recover all the pending commands. 6314 **/ 6315 void 6316 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6317 { 6318 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6319 MAILBOX_t *mb = &pmbox->u.mb; 6320 struct lpfc_sli *psli = &phba->sli; 6321 struct lpfc_sli_ring *pring; 6322 6323 /* Check the pmbox pointer first. There is a race condition 6324 * between the mbox timeout handler getting executed in the 6325 * worklist and the mailbox actually completing. When this 6326 * race condition occurs, the mbox_active will be NULL. 6327 */ 6328 spin_lock_irq(&phba->hbalock); 6329 if (pmbox == NULL) { 6330 lpfc_printf_log(phba, KERN_WARNING, 6331 LOG_MBOX | LOG_SLI, 6332 "0353 Active Mailbox cleared - mailbox timeout " 6333 "exiting\n"); 6334 spin_unlock_irq(&phba->hbalock); 6335 return; 6336 } 6337 6338 /* Mbox cmd <mbxCommand> timeout */ 6339 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6340 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 6341 mb->mbxCommand, 6342 phba->pport->port_state, 6343 phba->sli.sli_flag, 6344 phba->sli.mbox_active); 6345 spin_unlock_irq(&phba->hbalock); 6346 6347 /* Setting state unknown so lpfc_sli_abort_iocb_ring 6348 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 6349 * it to fail all outstanding SCSI IO. 6350 */ 6351 spin_lock_irq(&phba->pport->work_port_lock); 6352 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6353 spin_unlock_irq(&phba->pport->work_port_lock); 6354 spin_lock_irq(&phba->hbalock); 6355 phba->link_state = LPFC_LINK_UNKNOWN; 6356 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6357 spin_unlock_irq(&phba->hbalock); 6358 6359 pring = &psli->ring[psli->fcp_ring]; 6360 lpfc_sli_abort_iocb_ring(phba, pring); 6361 6362 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6363 "0345 Resetting board due to mailbox timeout\n"); 6364 6365 /* Reset the HBA device */ 6366 lpfc_reset_hba(phba); 6367 } 6368 6369 /** 6370 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 6371 * @phba: Pointer to HBA context object. 6372 * @pmbox: Pointer to mailbox object. 6373 * @flag: Flag indicating how the mailbox need to be processed. 6374 * 6375 * This function is called by discovery code and HBA management code 6376 * to submit a mailbox command to firmware with SLI-3 interface spec. This 6377 * function gets the hbalock to protect the data structures. 6378 * The mailbox command can be submitted in polling mode, in which case 6379 * this function will wait in a polling loop for the completion of the 6380 * mailbox. 6381 * If the mailbox is submitted in no_wait mode (not polling) the 6382 * function will submit the command and returns immediately without waiting 6383 * for the mailbox completion. The no_wait is supported only when HBA 6384 * is in SLI2/SLI3 mode - interrupts are enabled. 6385 * The SLI interface allows only one mailbox pending at a time. If the 6386 * mailbox is issued in polling mode and there is already a mailbox 6387 * pending, then the function will return an error. If the mailbox is issued 6388 * in NO_WAIT mode and there is a mailbox pending already, the function 6389 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 6390 * The sli layer owns the mailbox object until the completion of mailbox 6391 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 6392 * return codes the caller owns the mailbox command after the return of 6393 * the function. 6394 **/ 6395 static int 6396 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 6397 uint32_t flag) 6398 { 6399 MAILBOX_t *mb; 6400 struct lpfc_sli *psli = &phba->sli; 6401 uint32_t status, evtctr; 6402 uint32_t ha_copy, hc_copy; 6403 int i; 6404 unsigned long timeout; 6405 unsigned long drvr_flag = 0; 6406 uint32_t word0, ldata; 6407 void __iomem *to_slim; 6408 int processing_queue = 0; 6409 6410 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6411 if (!pmbox) { 6412 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6413 /* processing mbox queue from intr_handler */ 6414 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6415 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6416 return MBX_SUCCESS; 6417 } 6418 processing_queue = 1; 6419 pmbox = lpfc_mbox_get(phba); 6420 if (!pmbox) { 6421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6422 return MBX_SUCCESS; 6423 } 6424 } 6425 6426 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 6427 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 6428 if(!pmbox->vport) { 6429 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6430 lpfc_printf_log(phba, KERN_ERR, 6431 LOG_MBOX | LOG_VPORT, 6432 "1806 Mbox x%x failed. No vport\n", 6433 pmbox->u.mb.mbxCommand); 6434 dump_stack(); 6435 goto out_not_finished; 6436 } 6437 } 6438 6439 /* If the PCI channel is in offline state, do not post mbox. */ 6440 if (unlikely(pci_channel_offline(phba->pcidev))) { 6441 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6442 goto out_not_finished; 6443 } 6444 6445 /* If HBA has a deferred error attention, fail the iocb. */ 6446 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 6447 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6448 goto out_not_finished; 6449 } 6450 6451 psli = &phba->sli; 6452 6453 mb = &pmbox->u.mb; 6454 status = MBX_SUCCESS; 6455 6456 if (phba->link_state == LPFC_HBA_ERROR) { 6457 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6458 6459 /* Mbox command <mbxCommand> cannot issue */ 6460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6461 "(%d):0311 Mailbox command x%x cannot " 6462 "issue Data: x%x x%x\n", 6463 pmbox->vport ? pmbox->vport->vpi : 0, 6464 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6465 goto out_not_finished; 6466 } 6467 6468 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 6469 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 6470 !(hc_copy & HC_MBINT_ENA)) { 6471 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6473 "(%d):2528 Mailbox command x%x cannot " 6474 "issue Data: x%x x%x\n", 6475 pmbox->vport ? pmbox->vport->vpi : 0, 6476 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6477 goto out_not_finished; 6478 } 6479 } 6480 6481 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6482 /* Polling for a mbox command when another one is already active 6483 * is not allowed in SLI. Also, the driver must have established 6484 * SLI2 mode to queue and process multiple mbox commands. 6485 */ 6486 6487 if (flag & MBX_POLL) { 6488 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6489 6490 /* Mbox command <mbxCommand> cannot issue */ 6491 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6492 "(%d):2529 Mailbox command x%x " 6493 "cannot issue Data: x%x x%x\n", 6494 pmbox->vport ? pmbox->vport->vpi : 0, 6495 pmbox->u.mb.mbxCommand, 6496 psli->sli_flag, flag); 6497 goto out_not_finished; 6498 } 6499 6500 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 6501 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6502 /* Mbox command <mbxCommand> cannot issue */ 6503 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6504 "(%d):2530 Mailbox command x%x " 6505 "cannot issue Data: x%x x%x\n", 6506 pmbox->vport ? pmbox->vport->vpi : 0, 6507 pmbox->u.mb.mbxCommand, 6508 psli->sli_flag, flag); 6509 goto out_not_finished; 6510 } 6511 6512 /* Another mailbox command is still being processed, queue this 6513 * command to be processed later. 6514 */ 6515 lpfc_mbox_put(phba, pmbox); 6516 6517 /* Mbox cmd issue - BUSY */ 6518 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6519 "(%d):0308 Mbox cmd issue - BUSY Data: " 6520 "x%x x%x x%x x%x\n", 6521 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 6522 mb->mbxCommand, phba->pport->port_state, 6523 psli->sli_flag, flag); 6524 6525 psli->slistat.mbox_busy++; 6526 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6527 6528 if (pmbox->vport) { 6529 lpfc_debugfs_disc_trc(pmbox->vport, 6530 LPFC_DISC_TRC_MBOX_VPORT, 6531 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 6532 (uint32_t)mb->mbxCommand, 6533 mb->un.varWords[0], mb->un.varWords[1]); 6534 } 6535 else { 6536 lpfc_debugfs_disc_trc(phba->pport, 6537 LPFC_DISC_TRC_MBOX, 6538 "MBOX Bsy: cmd:x%x mb:x%x x%x", 6539 (uint32_t)mb->mbxCommand, 6540 mb->un.varWords[0], mb->un.varWords[1]); 6541 } 6542 6543 return MBX_BUSY; 6544 } 6545 6546 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6547 6548 /* If we are not polling, we MUST be in SLI2 mode */ 6549 if (flag != MBX_POLL) { 6550 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 6551 (mb->mbxCommand != MBX_KILL_BOARD)) { 6552 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6553 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6554 /* Mbox command <mbxCommand> cannot issue */ 6555 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6556 "(%d):2531 Mailbox command x%x " 6557 "cannot issue Data: x%x x%x\n", 6558 pmbox->vport ? pmbox->vport->vpi : 0, 6559 pmbox->u.mb.mbxCommand, 6560 psli->sli_flag, flag); 6561 goto out_not_finished; 6562 } 6563 /* timeout active mbox command */ 6564 mod_timer(&psli->mbox_tmo, (jiffies + 6565 (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); 6566 } 6567 6568 /* Mailbox cmd <cmd> issue */ 6569 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6570 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 6571 "x%x\n", 6572 pmbox->vport ? pmbox->vport->vpi : 0, 6573 mb->mbxCommand, phba->pport->port_state, 6574 psli->sli_flag, flag); 6575 6576 if (mb->mbxCommand != MBX_HEARTBEAT) { 6577 if (pmbox->vport) { 6578 lpfc_debugfs_disc_trc(pmbox->vport, 6579 LPFC_DISC_TRC_MBOX_VPORT, 6580 "MBOX Send vport: cmd:x%x mb:x%x x%x", 6581 (uint32_t)mb->mbxCommand, 6582 mb->un.varWords[0], mb->un.varWords[1]); 6583 } 6584 else { 6585 lpfc_debugfs_disc_trc(phba->pport, 6586 LPFC_DISC_TRC_MBOX, 6587 "MBOX Send: cmd:x%x mb:x%x x%x", 6588 (uint32_t)mb->mbxCommand, 6589 mb->un.varWords[0], mb->un.varWords[1]); 6590 } 6591 } 6592 6593 psli->slistat.mbox_cmd++; 6594 evtctr = psli->slistat.mbox_event; 6595 6596 /* next set own bit for the adapter and copy over command word */ 6597 mb->mbxOwner = OWN_CHIP; 6598 6599 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6600 /* Populate mbox extension offset word. */ 6601 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 6602 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6603 = (uint8_t *)phba->mbox_ext 6604 - (uint8_t *)phba->mbox; 6605 } 6606 6607 /* Copy the mailbox extension data */ 6608 if (pmbox->in_ext_byte_len && pmbox->context2) { 6609 lpfc_sli_pcimem_bcopy(pmbox->context2, 6610 (uint8_t *)phba->mbox_ext, 6611 pmbox->in_ext_byte_len); 6612 } 6613 /* Copy command data to host SLIM area */ 6614 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6615 } else { 6616 /* Populate mbox extension offset word. */ 6617 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 6618 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6619 = MAILBOX_HBA_EXT_OFFSET; 6620 6621 /* Copy the mailbox extension data */ 6622 if (pmbox->in_ext_byte_len && pmbox->context2) { 6623 lpfc_memcpy_to_slim(phba->MBslimaddr + 6624 MAILBOX_HBA_EXT_OFFSET, 6625 pmbox->context2, pmbox->in_ext_byte_len); 6626 6627 } 6628 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6629 /* copy command data into host mbox for cmpl */ 6630 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6631 } 6632 6633 /* First copy mbox command data to HBA SLIM, skip past first 6634 word */ 6635 to_slim = phba->MBslimaddr + sizeof (uint32_t); 6636 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 6637 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 6638 6639 /* Next copy over first word, with mbxOwner set */ 6640 ldata = *((uint32_t *)mb); 6641 to_slim = phba->MBslimaddr; 6642 writel(ldata, to_slim); 6643 readl(to_slim); /* flush */ 6644 6645 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6646 /* switch over to host mailbox */ 6647 psli->sli_flag |= LPFC_SLI_ACTIVE; 6648 } 6649 } 6650 6651 wmb(); 6652 6653 switch (flag) { 6654 case MBX_NOWAIT: 6655 /* Set up reference to mailbox command */ 6656 psli->mbox_active = pmbox; 6657 /* Interrupt board to do it */ 6658 writel(CA_MBATT, phba->CAregaddr); 6659 readl(phba->CAregaddr); /* flush */ 6660 /* Don't wait for it to finish, just return */ 6661 break; 6662 6663 case MBX_POLL: 6664 /* Set up null reference to mailbox command */ 6665 psli->mbox_active = NULL; 6666 /* Interrupt board to do it */ 6667 writel(CA_MBATT, phba->CAregaddr); 6668 readl(phba->CAregaddr); /* flush */ 6669 6670 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6671 /* First read mbox status word */ 6672 word0 = *((uint32_t *)phba->mbox); 6673 word0 = le32_to_cpu(word0); 6674 } else { 6675 /* First read mbox status word */ 6676 if (lpfc_readl(phba->MBslimaddr, &word0)) { 6677 spin_unlock_irqrestore(&phba->hbalock, 6678 drvr_flag); 6679 goto out_not_finished; 6680 } 6681 } 6682 6683 /* Read the HBA Host Attention Register */ 6684 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6685 spin_unlock_irqrestore(&phba->hbalock, 6686 drvr_flag); 6687 goto out_not_finished; 6688 } 6689 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 6690 1000) + jiffies; 6691 i = 0; 6692 /* Wait for command to complete */ 6693 while (((word0 & OWN_CHIP) == OWN_CHIP) || 6694 (!(ha_copy & HA_MBATT) && 6695 (phba->link_state > LPFC_WARM_START))) { 6696 if (time_after(jiffies, timeout)) { 6697 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6698 spin_unlock_irqrestore(&phba->hbalock, 6699 drvr_flag); 6700 goto out_not_finished; 6701 } 6702 6703 /* Check if we took a mbox interrupt while we were 6704 polling */ 6705 if (((word0 & OWN_CHIP) != OWN_CHIP) 6706 && (evtctr != psli->slistat.mbox_event)) 6707 break; 6708 6709 if (i++ > 10) { 6710 spin_unlock_irqrestore(&phba->hbalock, 6711 drvr_flag); 6712 msleep(1); 6713 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6714 } 6715 6716 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6717 /* First copy command data */ 6718 word0 = *((uint32_t *)phba->mbox); 6719 word0 = le32_to_cpu(word0); 6720 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6721 MAILBOX_t *slimmb; 6722 uint32_t slimword0; 6723 /* Check real SLIM for any errors */ 6724 slimword0 = readl(phba->MBslimaddr); 6725 slimmb = (MAILBOX_t *) & slimword0; 6726 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 6727 && slimmb->mbxStatus) { 6728 psli->sli_flag &= 6729 ~LPFC_SLI_ACTIVE; 6730 word0 = slimword0; 6731 } 6732 } 6733 } else { 6734 /* First copy command data */ 6735 word0 = readl(phba->MBslimaddr); 6736 } 6737 /* Read the HBA Host Attention Register */ 6738 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6739 spin_unlock_irqrestore(&phba->hbalock, 6740 drvr_flag); 6741 goto out_not_finished; 6742 } 6743 } 6744 6745 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6746 /* copy results back to user */ 6747 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 6748 /* Copy the mailbox extension data */ 6749 if (pmbox->out_ext_byte_len && pmbox->context2) { 6750 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 6751 pmbox->context2, 6752 pmbox->out_ext_byte_len); 6753 } 6754 } else { 6755 /* First copy command data */ 6756 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 6757 MAILBOX_CMD_SIZE); 6758 /* Copy the mailbox extension data */ 6759 if (pmbox->out_ext_byte_len && pmbox->context2) { 6760 lpfc_memcpy_from_slim(pmbox->context2, 6761 phba->MBslimaddr + 6762 MAILBOX_HBA_EXT_OFFSET, 6763 pmbox->out_ext_byte_len); 6764 } 6765 } 6766 6767 writel(HA_MBATT, phba->HAregaddr); 6768 readl(phba->HAregaddr); /* flush */ 6769 6770 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6771 status = mb->mbxStatus; 6772 } 6773 6774 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6775 return status; 6776 6777 out_not_finished: 6778 if (processing_queue) { 6779 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 6780 lpfc_mbox_cmpl_put(phba, pmbox); 6781 } 6782 return MBX_NOT_FINISHED; 6783 } 6784 6785 /** 6786 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 6787 * @phba: Pointer to HBA context object. 6788 * 6789 * The function blocks the posting of SLI4 asynchronous mailbox commands from 6790 * the driver internal pending mailbox queue. It will then try to wait out the 6791 * possible outstanding mailbox command before return. 6792 * 6793 * Returns: 6794 * 0 - the outstanding mailbox command completed; otherwise, the wait for 6795 * the outstanding mailbox command timed out. 6796 **/ 6797 static int 6798 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 6799 { 6800 struct lpfc_sli *psli = &phba->sli; 6801 int rc = 0; 6802 unsigned long timeout = 0; 6803 6804 /* Mark the asynchronous mailbox command posting as blocked */ 6805 spin_lock_irq(&phba->hbalock); 6806 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6807 /* Determine how long we might wait for the active mailbox 6808 * command to be gracefully completed by firmware. 6809 */ 6810 if (phba->sli.mbox_active) 6811 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 6812 phba->sli.mbox_active) * 6813 1000) + jiffies; 6814 spin_unlock_irq(&phba->hbalock); 6815 6816 /* Wait for the outstnading mailbox command to complete */ 6817 while (phba->sli.mbox_active) { 6818 /* Check active mailbox complete status every 2ms */ 6819 msleep(2); 6820 if (time_after(jiffies, timeout)) { 6821 /* Timeout, marked the outstanding cmd not complete */ 6822 rc = 1; 6823 break; 6824 } 6825 } 6826 6827 /* Can not cleanly block async mailbox command, fails it */ 6828 if (rc) { 6829 spin_lock_irq(&phba->hbalock); 6830 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6831 spin_unlock_irq(&phba->hbalock); 6832 } 6833 return rc; 6834 } 6835 6836 /** 6837 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 6838 * @phba: Pointer to HBA context object. 6839 * 6840 * The function unblocks and resume posting of SLI4 asynchronous mailbox 6841 * commands from the driver internal pending mailbox queue. It makes sure 6842 * that there is no outstanding mailbox command before resuming posting 6843 * asynchronous mailbox commands. If, for any reason, there is outstanding 6844 * mailbox command, it will try to wait it out before resuming asynchronous 6845 * mailbox command posting. 6846 **/ 6847 static void 6848 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 6849 { 6850 struct lpfc_sli *psli = &phba->sli; 6851 6852 spin_lock_irq(&phba->hbalock); 6853 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6854 /* Asynchronous mailbox posting is not blocked, do nothing */ 6855 spin_unlock_irq(&phba->hbalock); 6856 return; 6857 } 6858 6859 /* Outstanding synchronous mailbox command is guaranteed to be done, 6860 * successful or timeout, after timing-out the outstanding mailbox 6861 * command shall always be removed, so just unblock posting async 6862 * mailbox command and resume 6863 */ 6864 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6865 spin_unlock_irq(&phba->hbalock); 6866 6867 /* wake up worker thread to post asynchronlous mailbox command */ 6868 lpfc_worker_wake_up(phba); 6869 } 6870 6871 /** 6872 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 6873 * @phba: Pointer to HBA context object. 6874 * @mboxq: Pointer to mailbox object. 6875 * 6876 * The function posts a mailbox to the port. The mailbox is expected 6877 * to be comletely filled in and ready for the port to operate on it. 6878 * This routine executes a synchronous completion operation on the 6879 * mailbox by polling for its completion. 6880 * 6881 * The caller must not be holding any locks when calling this routine. 6882 * 6883 * Returns: 6884 * MBX_SUCCESS - mailbox posted successfully 6885 * Any of the MBX error values. 6886 **/ 6887 static int 6888 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 6889 { 6890 int rc = MBX_SUCCESS; 6891 unsigned long iflag; 6892 uint32_t db_ready; 6893 uint32_t mcqe_status; 6894 uint32_t mbx_cmnd; 6895 unsigned long timeout; 6896 struct lpfc_sli *psli = &phba->sli; 6897 struct lpfc_mqe *mb = &mboxq->u.mqe; 6898 struct lpfc_bmbx_create *mbox_rgn; 6899 struct dma_address *dma_address; 6900 struct lpfc_register bmbx_reg; 6901 6902 /* 6903 * Only one mailbox can be active to the bootstrap mailbox region 6904 * at a time and there is no queueing provided. 6905 */ 6906 spin_lock_irqsave(&phba->hbalock, iflag); 6907 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6908 spin_unlock_irqrestore(&phba->hbalock, iflag); 6909 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6910 "(%d):2532 Mailbox command x%x (x%x/x%x) " 6911 "cannot issue Data: x%x x%x\n", 6912 mboxq->vport ? mboxq->vport->vpi : 0, 6913 mboxq->u.mb.mbxCommand, 6914 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 6915 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 6916 psli->sli_flag, MBX_POLL); 6917 return MBXERR_ERROR; 6918 } 6919 /* The server grabs the token and owns it until release */ 6920 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6921 phba->sli.mbox_active = mboxq; 6922 spin_unlock_irqrestore(&phba->hbalock, iflag); 6923 6924 /* 6925 * Initialize the bootstrap memory region to avoid stale data areas 6926 * in the mailbox post. Then copy the caller's mailbox contents to 6927 * the bmbx mailbox region. 6928 */ 6929 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 6930 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 6931 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 6932 sizeof(struct lpfc_mqe)); 6933 6934 /* Post the high mailbox dma address to the port and wait for ready. */ 6935 dma_address = &phba->sli4_hba.bmbx.dma_address; 6936 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 6937 6938 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 6939 * 1000) + jiffies; 6940 do { 6941 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6942 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 6943 if (!db_ready) 6944 msleep(2); 6945 6946 if (time_after(jiffies, timeout)) { 6947 rc = MBXERR_ERROR; 6948 goto exit; 6949 } 6950 } while (!db_ready); 6951 6952 /* Post the low mailbox dma address to the port. */ 6953 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 6954 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 6955 * 1000) + jiffies; 6956 do { 6957 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6958 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 6959 if (!db_ready) 6960 msleep(2); 6961 6962 if (time_after(jiffies, timeout)) { 6963 rc = MBXERR_ERROR; 6964 goto exit; 6965 } 6966 } while (!db_ready); 6967 6968 /* 6969 * Read the CQ to ensure the mailbox has completed. 6970 * If so, update the mailbox status so that the upper layers 6971 * can complete the request normally. 6972 */ 6973 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 6974 sizeof(struct lpfc_mqe)); 6975 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 6976 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 6977 sizeof(struct lpfc_mcqe)); 6978 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 6979 /* 6980 * When the CQE status indicates a failure and the mailbox status 6981 * indicates success then copy the CQE status into the mailbox status 6982 * (and prefix it with x4000). 6983 */ 6984 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 6985 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 6986 bf_set(lpfc_mqe_status, mb, 6987 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 6988 rc = MBXERR_ERROR; 6989 } else 6990 lpfc_sli4_swap_str(phba, mboxq); 6991 6992 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6993 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 6994 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 6995 " x%x x%x CQ: x%x x%x x%x x%x\n", 6996 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 6997 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 6998 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 6999 bf_get(lpfc_mqe_status, mb), 7000 mb->un.mb_words[0], mb->un.mb_words[1], 7001 mb->un.mb_words[2], mb->un.mb_words[3], 7002 mb->un.mb_words[4], mb->un.mb_words[5], 7003 mb->un.mb_words[6], mb->un.mb_words[7], 7004 mb->un.mb_words[8], mb->un.mb_words[9], 7005 mb->un.mb_words[10], mb->un.mb_words[11], 7006 mb->un.mb_words[12], mboxq->mcqe.word0, 7007 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 7008 mboxq->mcqe.trailer); 7009 exit: 7010 /* We are holding the token, no needed for lock when release */ 7011 spin_lock_irqsave(&phba->hbalock, iflag); 7012 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7013 phba->sli.mbox_active = NULL; 7014 spin_unlock_irqrestore(&phba->hbalock, iflag); 7015 return rc; 7016 } 7017 7018 /** 7019 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 7020 * @phba: Pointer to HBA context object. 7021 * @pmbox: Pointer to mailbox object. 7022 * @flag: Flag indicating how the mailbox need to be processed. 7023 * 7024 * This function is called by discovery code and HBA management code to submit 7025 * a mailbox command to firmware with SLI-4 interface spec. 7026 * 7027 * Return codes the caller owns the mailbox command after the return of the 7028 * function. 7029 **/ 7030 static int 7031 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 7032 uint32_t flag) 7033 { 7034 struct lpfc_sli *psli = &phba->sli; 7035 unsigned long iflags; 7036 int rc; 7037 7038 /* dump from issue mailbox command if setup */ 7039 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 7040 7041 rc = lpfc_mbox_dev_check(phba); 7042 if (unlikely(rc)) { 7043 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7044 "(%d):2544 Mailbox command x%x (x%x/x%x) " 7045 "cannot issue Data: x%x x%x\n", 7046 mboxq->vport ? mboxq->vport->vpi : 0, 7047 mboxq->u.mb.mbxCommand, 7048 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7049 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7050 psli->sli_flag, flag); 7051 goto out_not_finished; 7052 } 7053 7054 /* Detect polling mode and jump to a handler */ 7055 if (!phba->sli4_hba.intr_enable) { 7056 if (flag == MBX_POLL) 7057 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7058 else 7059 rc = -EIO; 7060 if (rc != MBX_SUCCESS) 7061 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7062 "(%d):2541 Mailbox command x%x " 7063 "(x%x/x%x) cannot issue Data: " 7064 "x%x x%x\n", 7065 mboxq->vport ? mboxq->vport->vpi : 0, 7066 mboxq->u.mb.mbxCommand, 7067 lpfc_sli_config_mbox_subsys_get(phba, 7068 mboxq), 7069 lpfc_sli_config_mbox_opcode_get(phba, 7070 mboxq), 7071 psli->sli_flag, flag); 7072 return rc; 7073 } else if (flag == MBX_POLL) { 7074 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7075 "(%d):2542 Try to issue mailbox command " 7076 "x%x (x%x/x%x) synchronously ahead of async" 7077 "mailbox command queue: x%x x%x\n", 7078 mboxq->vport ? mboxq->vport->vpi : 0, 7079 mboxq->u.mb.mbxCommand, 7080 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7081 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7082 psli->sli_flag, flag); 7083 /* Try to block the asynchronous mailbox posting */ 7084 rc = lpfc_sli4_async_mbox_block(phba); 7085 if (!rc) { 7086 /* Successfully blocked, now issue sync mbox cmd */ 7087 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7088 if (rc != MBX_SUCCESS) 7089 lpfc_printf_log(phba, KERN_ERR, 7090 LOG_MBOX | LOG_SLI, 7091 "(%d):2597 Mailbox command " 7092 "x%x (x%x/x%x) cannot issue " 7093 "Data: x%x x%x\n", 7094 mboxq->vport ? 7095 mboxq->vport->vpi : 0, 7096 mboxq->u.mb.mbxCommand, 7097 lpfc_sli_config_mbox_subsys_get(phba, 7098 mboxq), 7099 lpfc_sli_config_mbox_opcode_get(phba, 7100 mboxq), 7101 psli->sli_flag, flag); 7102 /* Unblock the async mailbox posting afterward */ 7103 lpfc_sli4_async_mbox_unblock(phba); 7104 } 7105 return rc; 7106 } 7107 7108 /* Now, interrupt mode asynchrous mailbox command */ 7109 rc = lpfc_mbox_cmd_check(phba, mboxq); 7110 if (rc) { 7111 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7112 "(%d):2543 Mailbox command x%x (x%x/x%x) " 7113 "cannot issue Data: x%x x%x\n", 7114 mboxq->vport ? mboxq->vport->vpi : 0, 7115 mboxq->u.mb.mbxCommand, 7116 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7117 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7118 psli->sli_flag, flag); 7119 goto out_not_finished; 7120 } 7121 7122 /* Put the mailbox command to the driver internal FIFO */ 7123 psli->slistat.mbox_busy++; 7124 spin_lock_irqsave(&phba->hbalock, iflags); 7125 lpfc_mbox_put(phba, mboxq); 7126 spin_unlock_irqrestore(&phba->hbalock, iflags); 7127 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7128 "(%d):0354 Mbox cmd issue - Enqueue Data: " 7129 "x%x (x%x/x%x) x%x x%x x%x\n", 7130 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 7131 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7132 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7133 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7134 phba->pport->port_state, 7135 psli->sli_flag, MBX_NOWAIT); 7136 /* Wake up worker thread to transport mailbox command from head */ 7137 lpfc_worker_wake_up(phba); 7138 7139 return MBX_BUSY; 7140 7141 out_not_finished: 7142 return MBX_NOT_FINISHED; 7143 } 7144 7145 /** 7146 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 7147 * @phba: Pointer to HBA context object. 7148 * 7149 * This function is called by worker thread to send a mailbox command to 7150 * SLI4 HBA firmware. 7151 * 7152 **/ 7153 int 7154 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 7155 { 7156 struct lpfc_sli *psli = &phba->sli; 7157 LPFC_MBOXQ_t *mboxq; 7158 int rc = MBX_SUCCESS; 7159 unsigned long iflags; 7160 struct lpfc_mqe *mqe; 7161 uint32_t mbx_cmnd; 7162 7163 /* Check interrupt mode before post async mailbox command */ 7164 if (unlikely(!phba->sli4_hba.intr_enable)) 7165 return MBX_NOT_FINISHED; 7166 7167 /* Check for mailbox command service token */ 7168 spin_lock_irqsave(&phba->hbalock, iflags); 7169 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7170 spin_unlock_irqrestore(&phba->hbalock, iflags); 7171 return MBX_NOT_FINISHED; 7172 } 7173 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7174 spin_unlock_irqrestore(&phba->hbalock, iflags); 7175 return MBX_NOT_FINISHED; 7176 } 7177 if (unlikely(phba->sli.mbox_active)) { 7178 spin_unlock_irqrestore(&phba->hbalock, iflags); 7179 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7180 "0384 There is pending active mailbox cmd\n"); 7181 return MBX_NOT_FINISHED; 7182 } 7183 /* Take the mailbox command service token */ 7184 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7185 7186 /* Get the next mailbox command from head of queue */ 7187 mboxq = lpfc_mbox_get(phba); 7188 7189 /* If no more mailbox command waiting for post, we're done */ 7190 if (!mboxq) { 7191 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7192 spin_unlock_irqrestore(&phba->hbalock, iflags); 7193 return MBX_SUCCESS; 7194 } 7195 phba->sli.mbox_active = mboxq; 7196 spin_unlock_irqrestore(&phba->hbalock, iflags); 7197 7198 /* Check device readiness for posting mailbox command */ 7199 rc = lpfc_mbox_dev_check(phba); 7200 if (unlikely(rc)) 7201 /* Driver clean routine will clean up pending mailbox */ 7202 goto out_not_finished; 7203 7204 /* Prepare the mbox command to be posted */ 7205 mqe = &mboxq->u.mqe; 7206 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 7207 7208 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7209 mod_timer(&psli->mbox_tmo, (jiffies + 7210 (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); 7211 7212 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7213 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7214 "x%x x%x\n", 7215 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7216 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7217 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7218 phba->pport->port_state, psli->sli_flag); 7219 7220 if (mbx_cmnd != MBX_HEARTBEAT) { 7221 if (mboxq->vport) { 7222 lpfc_debugfs_disc_trc(mboxq->vport, 7223 LPFC_DISC_TRC_MBOX_VPORT, 7224 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7225 mbx_cmnd, mqe->un.mb_words[0], 7226 mqe->un.mb_words[1]); 7227 } else { 7228 lpfc_debugfs_disc_trc(phba->pport, 7229 LPFC_DISC_TRC_MBOX, 7230 "MBOX Send: cmd:x%x mb:x%x x%x", 7231 mbx_cmnd, mqe->un.mb_words[0], 7232 mqe->un.mb_words[1]); 7233 } 7234 } 7235 psli->slistat.mbox_cmd++; 7236 7237 /* Post the mailbox command to the port */ 7238 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 7239 if (rc != MBX_SUCCESS) { 7240 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7241 "(%d):2533 Mailbox command x%x (x%x/x%x) " 7242 "cannot issue Data: x%x x%x\n", 7243 mboxq->vport ? mboxq->vport->vpi : 0, 7244 mboxq->u.mb.mbxCommand, 7245 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7246 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7247 psli->sli_flag, MBX_NOWAIT); 7248 goto out_not_finished; 7249 } 7250 7251 return rc; 7252 7253 out_not_finished: 7254 spin_lock_irqsave(&phba->hbalock, iflags); 7255 if (phba->sli.mbox_active) { 7256 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7257 __lpfc_mbox_cmpl_put(phba, mboxq); 7258 /* Release the token */ 7259 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7260 phba->sli.mbox_active = NULL; 7261 } 7262 spin_unlock_irqrestore(&phba->hbalock, iflags); 7263 7264 return MBX_NOT_FINISHED; 7265 } 7266 7267 /** 7268 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 7269 * @phba: Pointer to HBA context object. 7270 * @pmbox: Pointer to mailbox object. 7271 * @flag: Flag indicating how the mailbox need to be processed. 7272 * 7273 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 7274 * the API jump table function pointer from the lpfc_hba struct. 7275 * 7276 * Return codes the caller owns the mailbox command after the return of the 7277 * function. 7278 **/ 7279 int 7280 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 7281 { 7282 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 7283 } 7284 7285 /** 7286 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 7287 * @phba: The hba struct for which this call is being executed. 7288 * @dev_grp: The HBA PCI-Device group number. 7289 * 7290 * This routine sets up the mbox interface API function jump table in @phba 7291 * struct. 7292 * Returns: 0 - success, -ENODEV - failure. 7293 **/ 7294 int 7295 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7296 { 7297 7298 switch (dev_grp) { 7299 case LPFC_PCI_DEV_LP: 7300 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 7301 phba->lpfc_sli_handle_slow_ring_event = 7302 lpfc_sli_handle_slow_ring_event_s3; 7303 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 7304 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 7305 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 7306 break; 7307 case LPFC_PCI_DEV_OC: 7308 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 7309 phba->lpfc_sli_handle_slow_ring_event = 7310 lpfc_sli_handle_slow_ring_event_s4; 7311 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 7312 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 7313 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 7314 break; 7315 default: 7316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7317 "1420 Invalid HBA PCI-device group: 0x%x\n", 7318 dev_grp); 7319 return -ENODEV; 7320 break; 7321 } 7322 return 0; 7323 } 7324 7325 /** 7326 * __lpfc_sli_ringtx_put - Add an iocb to the txq 7327 * @phba: Pointer to HBA context object. 7328 * @pring: Pointer to driver SLI ring object. 7329 * @piocb: Pointer to address of newly added command iocb. 7330 * 7331 * This function is called with hbalock held to add a command 7332 * iocb to the txq when SLI layer cannot submit the command iocb 7333 * to the ring. 7334 **/ 7335 void 7336 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7337 struct lpfc_iocbq *piocb) 7338 { 7339 /* Insert the caller's iocb in the txq tail for later processing. */ 7340 list_add_tail(&piocb->list, &pring->txq); 7341 pring->txq_cnt++; 7342 } 7343 7344 /** 7345 * lpfc_sli_next_iocb - Get the next iocb in the txq 7346 * @phba: Pointer to HBA context object. 7347 * @pring: Pointer to driver SLI ring object. 7348 * @piocb: Pointer to address of newly added command iocb. 7349 * 7350 * This function is called with hbalock held before a new 7351 * iocb is submitted to the firmware. This function checks 7352 * txq to flush the iocbs in txq to Firmware before 7353 * submitting new iocbs to the Firmware. 7354 * If there are iocbs in the txq which need to be submitted 7355 * to firmware, lpfc_sli_next_iocb returns the first element 7356 * of the txq after dequeuing it from txq. 7357 * If there is no iocb in the txq then the function will return 7358 * *piocb and *piocb is set to NULL. Caller needs to check 7359 * *piocb to find if there are more commands in the txq. 7360 **/ 7361 static struct lpfc_iocbq * 7362 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7363 struct lpfc_iocbq **piocb) 7364 { 7365 struct lpfc_iocbq * nextiocb; 7366 7367 nextiocb = lpfc_sli_ringtx_get(phba, pring); 7368 if (!nextiocb) { 7369 nextiocb = *piocb; 7370 *piocb = NULL; 7371 } 7372 7373 return nextiocb; 7374 } 7375 7376 /** 7377 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 7378 * @phba: Pointer to HBA context object. 7379 * @ring_number: SLI ring number to issue iocb on. 7380 * @piocb: Pointer to command iocb. 7381 * @flag: Flag indicating if this command can be put into txq. 7382 * 7383 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 7384 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 7385 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 7386 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 7387 * this function allows only iocbs for posting buffers. This function finds 7388 * next available slot in the command ring and posts the command to the 7389 * available slot and writes the port attention register to request HBA start 7390 * processing new iocb. If there is no slot available in the ring and 7391 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 7392 * the function returns IOCB_BUSY. 7393 * 7394 * This function is called with hbalock held. The function will return success 7395 * after it successfully submit the iocb to firmware or after adding to the 7396 * txq. 7397 **/ 7398 static int 7399 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 7400 struct lpfc_iocbq *piocb, uint32_t flag) 7401 { 7402 struct lpfc_iocbq *nextiocb; 7403 IOCB_t *iocb; 7404 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7405 7406 if (piocb->iocb_cmpl && (!piocb->vport) && 7407 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 7408 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 7409 lpfc_printf_log(phba, KERN_ERR, 7410 LOG_SLI | LOG_VPORT, 7411 "1807 IOCB x%x failed. No vport\n", 7412 piocb->iocb.ulpCommand); 7413 dump_stack(); 7414 return IOCB_ERROR; 7415 } 7416 7417 7418 /* If the PCI channel is in offline state, do not post iocbs. */ 7419 if (unlikely(pci_channel_offline(phba->pcidev))) 7420 return IOCB_ERROR; 7421 7422 /* If HBA has a deferred error attention, fail the iocb. */ 7423 if (unlikely(phba->hba_flag & DEFER_ERATT)) 7424 return IOCB_ERROR; 7425 7426 /* 7427 * We should never get an IOCB if we are in a < LINK_DOWN state 7428 */ 7429 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 7430 return IOCB_ERROR; 7431 7432 /* 7433 * Check to see if we are blocking IOCB processing because of a 7434 * outstanding event. 7435 */ 7436 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 7437 goto iocb_busy; 7438 7439 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 7440 /* 7441 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 7442 * can be issued if the link is not up. 7443 */ 7444 switch (piocb->iocb.ulpCommand) { 7445 case CMD_GEN_REQUEST64_CR: 7446 case CMD_GEN_REQUEST64_CX: 7447 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 7448 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 7449 FC_RCTL_DD_UNSOL_CMD) || 7450 (piocb->iocb.un.genreq64.w5.hcsw.Type != 7451 MENLO_TRANSPORT_TYPE)) 7452 7453 goto iocb_busy; 7454 break; 7455 case CMD_QUE_RING_BUF_CN: 7456 case CMD_QUE_RING_BUF64_CN: 7457 /* 7458 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 7459 * completion, iocb_cmpl MUST be 0. 7460 */ 7461 if (piocb->iocb_cmpl) 7462 piocb->iocb_cmpl = NULL; 7463 /*FALLTHROUGH*/ 7464 case CMD_CREATE_XRI_CR: 7465 case CMD_CLOSE_XRI_CN: 7466 case CMD_CLOSE_XRI_CX: 7467 break; 7468 default: 7469 goto iocb_busy; 7470 } 7471 7472 /* 7473 * For FCP commands, we must be in a state where we can process link 7474 * attention events. 7475 */ 7476 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 7477 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 7478 goto iocb_busy; 7479 } 7480 7481 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 7482 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 7483 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 7484 7485 if (iocb) 7486 lpfc_sli_update_ring(phba, pring); 7487 else 7488 lpfc_sli_update_full_ring(phba, pring); 7489 7490 if (!piocb) 7491 return IOCB_SUCCESS; 7492 7493 goto out_busy; 7494 7495 iocb_busy: 7496 pring->stats.iocb_cmd_delay++; 7497 7498 out_busy: 7499 7500 if (!(flag & SLI_IOCB_RET_IOCB)) { 7501 __lpfc_sli_ringtx_put(phba, pring, piocb); 7502 return IOCB_SUCCESS; 7503 } 7504 7505 return IOCB_BUSY; 7506 } 7507 7508 /** 7509 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 7510 * @phba: Pointer to HBA context object. 7511 * @piocb: Pointer to command iocb. 7512 * @sglq: Pointer to the scatter gather queue object. 7513 * 7514 * This routine converts the bpl or bde that is in the IOCB 7515 * to a sgl list for the sli4 hardware. The physical address 7516 * of the bpl/bde is converted back to a virtual address. 7517 * If the IOCB contains a BPL then the list of BDE's is 7518 * converted to sli4_sge's. If the IOCB contains a single 7519 * BDE then it is converted to a single sli_sge. 7520 * The IOCB is still in cpu endianess so the contents of 7521 * the bpl can be used without byte swapping. 7522 * 7523 * Returns valid XRI = Success, NO_XRI = Failure. 7524 **/ 7525 static uint16_t 7526 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 7527 struct lpfc_sglq *sglq) 7528 { 7529 uint16_t xritag = NO_XRI; 7530 struct ulp_bde64 *bpl = NULL; 7531 struct ulp_bde64 bde; 7532 struct sli4_sge *sgl = NULL; 7533 struct lpfc_dmabuf *dmabuf; 7534 IOCB_t *icmd; 7535 int numBdes = 0; 7536 int i = 0; 7537 uint32_t offset = 0; /* accumulated offset in the sg request list */ 7538 int inbound = 0; /* number of sg reply entries inbound from firmware */ 7539 7540 if (!piocbq || !sglq) 7541 return xritag; 7542 7543 sgl = (struct sli4_sge *)sglq->sgl; 7544 icmd = &piocbq->iocb; 7545 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 7546 return sglq->sli4_xritag; 7547 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7548 numBdes = icmd->un.genreq64.bdl.bdeSize / 7549 sizeof(struct ulp_bde64); 7550 /* The addrHigh and addrLow fields within the IOCB 7551 * have not been byteswapped yet so there is no 7552 * need to swap them back. 7553 */ 7554 if (piocbq->context3) 7555 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 7556 else 7557 return xritag; 7558 7559 bpl = (struct ulp_bde64 *)dmabuf->virt; 7560 if (!bpl) 7561 return xritag; 7562 7563 for (i = 0; i < numBdes; i++) { 7564 /* Should already be byte swapped. */ 7565 sgl->addr_hi = bpl->addrHigh; 7566 sgl->addr_lo = bpl->addrLow; 7567 7568 sgl->word2 = le32_to_cpu(sgl->word2); 7569 if ((i+1) == numBdes) 7570 bf_set(lpfc_sli4_sge_last, sgl, 1); 7571 else 7572 bf_set(lpfc_sli4_sge_last, sgl, 0); 7573 /* swap the size field back to the cpu so we 7574 * can assign it to the sgl. 7575 */ 7576 bde.tus.w = le32_to_cpu(bpl->tus.w); 7577 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 7578 /* The offsets in the sgl need to be accumulated 7579 * separately for the request and reply lists. 7580 * The request is always first, the reply follows. 7581 */ 7582 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 7583 /* add up the reply sg entries */ 7584 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 7585 inbound++; 7586 /* first inbound? reset the offset */ 7587 if (inbound == 1) 7588 offset = 0; 7589 bf_set(lpfc_sli4_sge_offset, sgl, offset); 7590 bf_set(lpfc_sli4_sge_type, sgl, 7591 LPFC_SGE_TYPE_DATA); 7592 offset += bde.tus.f.bdeSize; 7593 } 7594 sgl->word2 = cpu_to_le32(sgl->word2); 7595 bpl++; 7596 sgl++; 7597 } 7598 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 7599 /* The addrHigh and addrLow fields of the BDE have not 7600 * been byteswapped yet so they need to be swapped 7601 * before putting them in the sgl. 7602 */ 7603 sgl->addr_hi = 7604 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 7605 sgl->addr_lo = 7606 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 7607 sgl->word2 = le32_to_cpu(sgl->word2); 7608 bf_set(lpfc_sli4_sge_last, sgl, 1); 7609 sgl->word2 = cpu_to_le32(sgl->word2); 7610 sgl->sge_len = 7611 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 7612 } 7613 return sglq->sli4_xritag; 7614 } 7615 7616 /** 7617 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 7618 * @phba: Pointer to HBA context object. 7619 * 7620 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 7621 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 7622 * held. 7623 * 7624 * Return: index into SLI4 fast-path FCP queue index. 7625 **/ 7626 static uint32_t 7627 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7628 { 7629 ++phba->fcp_qidx; 7630 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 7631 phba->fcp_qidx = 0; 7632 7633 return phba->fcp_qidx; 7634 } 7635 7636 /** 7637 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 7638 * @phba: Pointer to HBA context object. 7639 * @piocb: Pointer to command iocb. 7640 * @wqe: Pointer to the work queue entry. 7641 * 7642 * This routine converts the iocb command to its Work Queue Entry 7643 * equivalent. The wqe pointer should not have any fields set when 7644 * this routine is called because it will memcpy over them. 7645 * This routine does not set the CQ_ID or the WQEC bits in the 7646 * wqe. 7647 * 7648 * Returns: 0 = Success, IOCB_ERROR = Failure. 7649 **/ 7650 static int 7651 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 7652 union lpfc_wqe *wqe) 7653 { 7654 uint32_t xmit_len = 0, total_len = 0; 7655 uint8_t ct = 0; 7656 uint32_t fip; 7657 uint32_t abort_tag; 7658 uint8_t command_type = ELS_COMMAND_NON_FIP; 7659 uint8_t cmnd; 7660 uint16_t xritag; 7661 uint16_t abrt_iotag; 7662 struct lpfc_iocbq *abrtiocbq; 7663 struct ulp_bde64 *bpl = NULL; 7664 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 7665 int numBdes, i; 7666 struct ulp_bde64 bde; 7667 struct lpfc_nodelist *ndlp; 7668 uint32_t *pcmd; 7669 uint32_t if_type; 7670 7671 fip = phba->hba_flag & HBA_FIP_SUPPORT; 7672 /* The fcp commands will set command type */ 7673 if (iocbq->iocb_flag & LPFC_IO_FCP) 7674 command_type = FCP_COMMAND; 7675 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 7676 command_type = ELS_COMMAND_FIP; 7677 else 7678 command_type = ELS_COMMAND_NON_FIP; 7679 7680 /* Some of the fields are in the right position already */ 7681 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 7682 abort_tag = (uint32_t) iocbq->iotag; 7683 xritag = iocbq->sli4_xritag; 7684 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 7685 /* words0-2 bpl convert bde */ 7686 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7687 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 7688 sizeof(struct ulp_bde64); 7689 bpl = (struct ulp_bde64 *) 7690 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 7691 if (!bpl) 7692 return IOCB_ERROR; 7693 7694 /* Should already be byte swapped. */ 7695 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 7696 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 7697 /* swap the size field back to the cpu so we 7698 * can assign it to the sgl. 7699 */ 7700 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 7701 xmit_len = wqe->generic.bde.tus.f.bdeSize; 7702 total_len = 0; 7703 for (i = 0; i < numBdes; i++) { 7704 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 7705 total_len += bde.tus.f.bdeSize; 7706 } 7707 } else 7708 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 7709 7710 iocbq->iocb.ulpIoTag = iocbq->iotag; 7711 cmnd = iocbq->iocb.ulpCommand; 7712 7713 switch (iocbq->iocb.ulpCommand) { 7714 case CMD_ELS_REQUEST64_CR: 7715 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7716 if (!iocbq->iocb.ulpLe) { 7717 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7718 "2007 Only Limited Edition cmd Format" 7719 " supported 0x%x\n", 7720 iocbq->iocb.ulpCommand); 7721 return IOCB_ERROR; 7722 } 7723 7724 wqe->els_req.payload_len = xmit_len; 7725 /* Els_reguest64 has a TMO */ 7726 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 7727 iocbq->iocb.ulpTimeout); 7728 /* Need a VF for word 4 set the vf bit*/ 7729 bf_set(els_req64_vf, &wqe->els_req, 0); 7730 /* And a VFID for word 12 */ 7731 bf_set(els_req64_vfid, &wqe->els_req, 0); 7732 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 7733 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7734 iocbq->iocb.ulpContext); 7735 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 7736 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 7737 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 7738 if (command_type == ELS_COMMAND_FIP) 7739 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7740 >> LPFC_FIP_ELS_ID_SHIFT); 7741 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 7742 iocbq->context2)->virt); 7743 if_type = bf_get(lpfc_sli_intf_if_type, 7744 &phba->sli4_hba.sli_intf); 7745 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7746 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 7747 *pcmd == ELS_CMD_SCR || 7748 *pcmd == ELS_CMD_FDISC || 7749 *pcmd == ELS_CMD_LOGO || 7750 *pcmd == ELS_CMD_PLOGI)) { 7751 bf_set(els_req64_sp, &wqe->els_req, 1); 7752 bf_set(els_req64_sid, &wqe->els_req, 7753 iocbq->vport->fc_myDID); 7754 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 7755 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7756 phba->vpi_ids[phba->pport->vpi]); 7757 } else if (pcmd && iocbq->context1) { 7758 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 7759 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7760 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7761 } 7762 } 7763 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 7764 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7765 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7766 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7767 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7768 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 7769 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7770 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 7771 break; 7772 case CMD_XMIT_SEQUENCE64_CX: 7773 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 7774 iocbq->iocb.un.ulpWord[3]); 7775 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 7776 iocbq->iocb.unsli3.rcvsli3.ox_id); 7777 /* The entire sequence is transmitted for this IOCB */ 7778 xmit_len = total_len; 7779 cmnd = CMD_XMIT_SEQUENCE64_CR; 7780 if (phba->link_flag & LS_LOOPBACK_MODE) 7781 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 7782 case CMD_XMIT_SEQUENCE64_CR: 7783 /* word3 iocb=io_tag32 wqe=reserved */ 7784 wqe->xmit_sequence.rsvd3 = 0; 7785 /* word4 relative_offset memcpy */ 7786 /* word5 r_ctl/df_ctl memcpy */ 7787 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 7788 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 7789 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 7790 LPFC_WQE_IOD_WRITE); 7791 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 7792 LPFC_WQE_LENLOC_WORD12); 7793 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 7794 wqe->xmit_sequence.xmit_len = xmit_len; 7795 command_type = OTHER_COMMAND; 7796 break; 7797 case CMD_XMIT_BCAST64_CN: 7798 /* word3 iocb=iotag32 wqe=seq_payload_len */ 7799 wqe->xmit_bcast64.seq_payload_len = xmit_len; 7800 /* word4 iocb=rsvd wqe=rsvd */ 7801 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 7802 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 7803 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 7804 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7805 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 7806 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 7807 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 7808 LPFC_WQE_LENLOC_WORD3); 7809 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 7810 break; 7811 case CMD_FCP_IWRITE64_CR: 7812 command_type = FCP_COMMAND_DATA_OUT; 7813 /* word3 iocb=iotag wqe=payload_offset_len */ 7814 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7815 wqe->fcp_iwrite.payload_offset_len = 7816 xmit_len + sizeof(struct fcp_rsp); 7817 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 7818 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 7819 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 7820 iocbq->iocb.ulpFCP2Rcvy); 7821 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 7822 /* Always open the exchange */ 7823 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 7824 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 7825 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 7826 LPFC_WQE_LENLOC_WORD4); 7827 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7828 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7829 if (iocbq->iocb_flag & LPFC_IO_DIF) { 7830 iocbq->iocb_flag &= ~LPFC_IO_DIF; 7831 bf_set(wqe_dif, &wqe->generic.wqe_com, 1); 7832 } 7833 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 7834 break; 7835 case CMD_FCP_IREAD64_CR: 7836 /* word3 iocb=iotag wqe=payload_offset_len */ 7837 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7838 wqe->fcp_iread.payload_offset_len = 7839 xmit_len + sizeof(struct fcp_rsp); 7840 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 7841 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 7842 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 7843 iocbq->iocb.ulpFCP2Rcvy); 7844 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 7845 /* Always open the exchange */ 7846 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 7847 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 7848 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 7849 LPFC_WQE_LENLOC_WORD4); 7850 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 7851 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 7852 if (iocbq->iocb_flag & LPFC_IO_DIF) { 7853 iocbq->iocb_flag &= ~LPFC_IO_DIF; 7854 bf_set(wqe_dif, &wqe->generic.wqe_com, 1); 7855 } 7856 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 7857 break; 7858 case CMD_FCP_ICMND64_CR: 7859 /* word3 iocb=IO_TAG wqe=reserved */ 7860 wqe->fcp_icmd.rsrvd3 = 0; 7861 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 7862 /* Always open the exchange */ 7863 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 7864 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 7865 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 7866 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 7867 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 7868 LPFC_WQE_LENLOC_NONE); 7869 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 7870 break; 7871 case CMD_GEN_REQUEST64_CR: 7872 /* For this command calculate the xmit length of the 7873 * request bde. 7874 */ 7875 xmit_len = 0; 7876 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 7877 sizeof(struct ulp_bde64); 7878 for (i = 0; i < numBdes; i++) { 7879 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 7880 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 7881 break; 7882 xmit_len += bde.tus.f.bdeSize; 7883 } 7884 /* word3 iocb=IO_TAG wqe=request_payload_len */ 7885 wqe->gen_req.request_payload_len = xmit_len; 7886 /* word4 iocb=parameter wqe=relative_offset memcpy */ 7887 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 7888 /* word6 context tag copied in memcpy */ 7889 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 7890 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 7891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7892 "2015 Invalid CT %x command 0x%x\n", 7893 ct, iocbq->iocb.ulpCommand); 7894 return IOCB_ERROR; 7895 } 7896 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 7897 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 7898 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 7899 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 7900 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 7901 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 7902 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7903 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 7904 command_type = OTHER_COMMAND; 7905 break; 7906 case CMD_XMIT_ELS_RSP64_CX: 7907 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7908 /* words0-2 BDE memcpy */ 7909 /* word3 iocb=iotag32 wqe=response_payload_len */ 7910 wqe->xmit_els_rsp.response_payload_len = xmit_len; 7911 /* word4 iocb=did wge=rsvd. */ 7912 wqe->xmit_els_rsp.rsvd4 = 0; 7913 /* word5 iocb=rsvd wge=did */ 7914 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 7915 iocbq->iocb.un.elsreq64.remoteID); 7916 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 7917 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7918 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 7919 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7920 iocbq->iocb.unsli3.rcvsli3.ox_id); 7921 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7922 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7923 phba->vpi_ids[iocbq->vport->vpi]); 7924 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 7925 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 7926 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 7927 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 7928 LPFC_WQE_LENLOC_WORD3); 7929 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7930 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 7931 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7932 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 7933 iocbq->context2)->virt); 7934 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7935 bf_set(els_req64_sp, &wqe->els_req, 1); 7936 bf_set(els_req64_sid, &wqe->els_req, 7937 iocbq->vport->fc_myDID); 7938 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 7939 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7940 phba->vpi_ids[phba->pport->vpi]); 7941 } 7942 command_type = OTHER_COMMAND; 7943 break; 7944 case CMD_CLOSE_XRI_CN: 7945 case CMD_ABORT_XRI_CN: 7946 case CMD_ABORT_XRI_CX: 7947 /* words 0-2 memcpy should be 0 rserved */ 7948 /* port will send abts */ 7949 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 7950 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 7951 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 7952 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 7953 } else 7954 fip = 0; 7955 7956 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 7957 /* 7958 * The link is down, or the command was ELS_FIP 7959 * so the fw does not need to send abts 7960 * on the wire. 7961 */ 7962 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 7963 else 7964 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 7965 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 7966 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 7967 wqe->abort_cmd.rsrvd5 = 0; 7968 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 7969 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7970 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 7971 /* 7972 * The abort handler will send us CMD_ABORT_XRI_CN or 7973 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 7974 */ 7975 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 7976 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 7977 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 7978 LPFC_WQE_LENLOC_NONE); 7979 cmnd = CMD_ABORT_XRI_CX; 7980 command_type = OTHER_COMMAND; 7981 xritag = 0; 7982 break; 7983 case CMD_XMIT_BLS_RSP64_CX: 7984 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7985 /* As BLS ABTS RSP WQE is very different from other WQEs, 7986 * we re-construct this WQE here based on information in 7987 * iocbq from scratch. 7988 */ 7989 memset(wqe, 0, sizeof(union lpfc_wqe)); 7990 /* OX_ID is invariable to who sent ABTS to CT exchange */ 7991 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 7992 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 7993 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 7994 LPFC_ABTS_UNSOL_INT) { 7995 /* ABTS sent by initiator to CT exchange, the 7996 * RX_ID field will be filled with the newly 7997 * allocated responder XRI. 7998 */ 7999 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8000 iocbq->sli4_xritag); 8001 } else { 8002 /* ABTS sent by responder to CT exchange, the 8003 * RX_ID field will be filled with the responder 8004 * RX_ID from ABTS. 8005 */ 8006 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8007 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 8008 } 8009 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 8010 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 8011 8012 /* Use CT=VPI */ 8013 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 8014 ndlp->nlp_DID); 8015 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 8016 iocbq->iocb.ulpContext); 8017 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 8018 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 8019 phba->vpi_ids[phba->pport->vpi]); 8020 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 8021 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 8022 LPFC_WQE_LENLOC_NONE); 8023 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 8024 command_type = OTHER_COMMAND; 8025 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 8026 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 8027 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 8028 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 8029 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 8030 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 8031 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 8032 } 8033 8034 break; 8035 case CMD_XRI_ABORTED_CX: 8036 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 8037 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 8038 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 8039 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 8040 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 8041 default: 8042 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8043 "2014 Invalid command 0x%x\n", 8044 iocbq->iocb.ulpCommand); 8045 return IOCB_ERROR; 8046 break; 8047 } 8048 8049 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 8050 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 8051 wqe->generic.wqe_com.abort_tag = abort_tag; 8052 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 8053 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 8054 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 8055 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 8056 return 0; 8057 } 8058 8059 /** 8060 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 8061 * @phba: Pointer to HBA context object. 8062 * @ring_number: SLI ring number to issue iocb on. 8063 * @piocb: Pointer to command iocb. 8064 * @flag: Flag indicating if this command can be put into txq. 8065 * 8066 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 8067 * an iocb command to an HBA with SLI-4 interface spec. 8068 * 8069 * This function is called with hbalock held. The function will return success 8070 * after it successfully submit the iocb to firmware or after adding to the 8071 * txq. 8072 **/ 8073 static int 8074 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 8075 struct lpfc_iocbq *piocb, uint32_t flag) 8076 { 8077 struct lpfc_sglq *sglq; 8078 union lpfc_wqe wqe; 8079 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8080 8081 if (piocb->sli4_xritag == NO_XRI) { 8082 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8083 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8084 sglq = NULL; 8085 else { 8086 if (pring->txq_cnt) { 8087 if (!(flag & SLI_IOCB_RET_IOCB)) { 8088 __lpfc_sli_ringtx_put(phba, 8089 pring, piocb); 8090 return IOCB_SUCCESS; 8091 } else { 8092 return IOCB_BUSY; 8093 } 8094 } else { 8095 sglq = __lpfc_sli_get_sglq(phba, piocb); 8096 if (!sglq) { 8097 if (!(flag & SLI_IOCB_RET_IOCB)) { 8098 __lpfc_sli_ringtx_put(phba, 8099 pring, 8100 piocb); 8101 return IOCB_SUCCESS; 8102 } else 8103 return IOCB_BUSY; 8104 } 8105 } 8106 } 8107 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 8108 /* These IO's already have an XRI and a mapped sgl. */ 8109 sglq = NULL; 8110 } else { 8111 /* 8112 * This is a continuation of a commandi,(CX) so this 8113 * sglq is on the active list 8114 */ 8115 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 8116 if (!sglq) 8117 return IOCB_ERROR; 8118 } 8119 8120 if (sglq) { 8121 piocb->sli4_lxritag = sglq->sli4_lxritag; 8122 piocb->sli4_xritag = sglq->sli4_xritag; 8123 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 8124 return IOCB_ERROR; 8125 } 8126 8127 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 8128 return IOCB_ERROR; 8129 8130 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8131 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8132 /* 8133 * For FCP command IOCB, get a new WQ index to distribute 8134 * WQE across the WQsr. On the other hand, for abort IOCB, 8135 * it carries the same WQ index to the original command 8136 * IOCB. 8137 */ 8138 if (piocb->iocb_flag & LPFC_IO_FCP) 8139 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8140 if (unlikely(!phba->sli4_hba.fcp_wq)) 8141 return IOCB_ERROR; 8142 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8143 &wqe)) 8144 return IOCB_ERROR; 8145 } else { 8146 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8147 return IOCB_ERROR; 8148 } 8149 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 8150 8151 return 0; 8152 } 8153 8154 /** 8155 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 8156 * 8157 * This routine wraps the actual lockless version for issusing IOCB function 8158 * pointer from the lpfc_hba struct. 8159 * 8160 * Return codes: 8161 * IOCB_ERROR - Error 8162 * IOCB_SUCCESS - Success 8163 * IOCB_BUSY - Busy 8164 **/ 8165 int 8166 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8167 struct lpfc_iocbq *piocb, uint32_t flag) 8168 { 8169 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8170 } 8171 8172 /** 8173 * lpfc_sli_api_table_setup - Set up sli api function jump table 8174 * @phba: The hba struct for which this call is being executed. 8175 * @dev_grp: The HBA PCI-Device group number. 8176 * 8177 * This routine sets up the SLI interface API function jump table in @phba 8178 * struct. 8179 * Returns: 0 - success, -ENODEV - failure. 8180 **/ 8181 int 8182 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8183 { 8184 8185 switch (dev_grp) { 8186 case LPFC_PCI_DEV_LP: 8187 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 8188 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 8189 break; 8190 case LPFC_PCI_DEV_OC: 8191 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 8192 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 8193 break; 8194 default: 8195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8196 "1419 Invalid HBA PCI-device group: 0x%x\n", 8197 dev_grp); 8198 return -ENODEV; 8199 break; 8200 } 8201 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 8202 return 0; 8203 } 8204 8205 /** 8206 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 8207 * @phba: Pointer to HBA context object. 8208 * @pring: Pointer to driver SLI ring object. 8209 * @piocb: Pointer to command iocb. 8210 * @flag: Flag indicating if this command can be put into txq. 8211 * 8212 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 8213 * function. This function gets the hbalock and calls 8214 * __lpfc_sli_issue_iocb function and will return the error returned 8215 * by __lpfc_sli_issue_iocb function. This wrapper is used by 8216 * functions which do not hold hbalock. 8217 **/ 8218 int 8219 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8220 struct lpfc_iocbq *piocb, uint32_t flag) 8221 { 8222 unsigned long iflags; 8223 int rc; 8224 8225 spin_lock_irqsave(&phba->hbalock, iflags); 8226 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8227 spin_unlock_irqrestore(&phba->hbalock, iflags); 8228 8229 return rc; 8230 } 8231 8232 /** 8233 * lpfc_extra_ring_setup - Extra ring setup function 8234 * @phba: Pointer to HBA context object. 8235 * 8236 * This function is called while driver attaches with the 8237 * HBA to setup the extra ring. The extra ring is used 8238 * only when driver needs to support target mode functionality 8239 * or IP over FC functionalities. 8240 * 8241 * This function is called with no lock held. 8242 **/ 8243 static int 8244 lpfc_extra_ring_setup( struct lpfc_hba *phba) 8245 { 8246 struct lpfc_sli *psli; 8247 struct lpfc_sli_ring *pring; 8248 8249 psli = &phba->sli; 8250 8251 /* Adjust cmd/rsp ring iocb entries more evenly */ 8252 8253 /* Take some away from the FCP ring */ 8254 pring = &psli->ring[psli->fcp_ring]; 8255 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8256 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8257 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8258 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8259 8260 /* and give them to the extra ring */ 8261 pring = &psli->ring[psli->extra_ring]; 8262 8263 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8264 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8265 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8266 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8267 8268 /* Setup default profile for this ring */ 8269 pring->iotag_max = 4096; 8270 pring->num_mask = 1; 8271 pring->prt[0].profile = 0; /* Mask 0 */ 8272 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 8273 pring->prt[0].type = phba->cfg_multi_ring_type; 8274 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 8275 return 0; 8276 } 8277 8278 /* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS. 8279 * @vport: pointer to virtual port object. 8280 * @ndlp: nodelist pointer for the impacted rport. 8281 * 8282 * The driver calls this routine in response to a XRI ABORT CQE 8283 * event from the port. In this event, the driver is required to 8284 * recover its login to the rport even though its login may be valid 8285 * from the driver's perspective. The failed ABTS notice from the 8286 * port indicates the rport is not responding. 8287 */ 8288 static void 8289 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 8290 struct lpfc_nodelist *ndlp) 8291 { 8292 struct Scsi_Host *shost; 8293 struct lpfc_hba *phba; 8294 unsigned long flags = 0; 8295 8296 shost = lpfc_shost_from_vport(vport); 8297 phba = vport->phba; 8298 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 8299 lpfc_printf_log(phba, KERN_INFO, 8300 LOG_SLI, "3093 No rport recovery needed. " 8301 "rport in state 0x%x\n", 8302 ndlp->nlp_state); 8303 return; 8304 } 8305 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8306 "3094 Start rport recovery on shost id 0x%x " 8307 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 8308 "flags 0x%x\n", 8309 shost->host_no, ndlp->nlp_DID, 8310 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 8311 ndlp->nlp_flag); 8312 /* 8313 * The rport is not responding. Don't attempt ADISC recovery. 8314 * Remove the FCP-2 flag to force a PLOGI. 8315 */ 8316 spin_lock_irqsave(shost->host_lock, flags); 8317 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 8318 spin_unlock_irqrestore(shost->host_lock, flags); 8319 lpfc_disc_state_machine(vport, ndlp, NULL, 8320 NLP_EVT_DEVICE_RECOVERY); 8321 lpfc_cancel_retry_delay_tmo(vport, ndlp); 8322 spin_lock_irqsave(shost->host_lock, flags); 8323 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 8324 spin_unlock_irqrestore(shost->host_lock, flags); 8325 lpfc_disc_start(vport); 8326 } 8327 8328 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8329 * @phba: Pointer to HBA context object. 8330 * @iocbq: Pointer to iocb object. 8331 * 8332 * The async_event handler calls this routine when it receives 8333 * an ASYNC_STATUS_CN event from the port. The port generates 8334 * this event when an Abort Sequence request to an rport fails 8335 * twice in succession. The abort could be originated by the 8336 * driver or by the port. The ABTS could have been for an ELS 8337 * or FCP IO. The port only generates this event when an ABTS 8338 * fails to complete after one retry. 8339 */ 8340 static void 8341 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 8342 struct lpfc_iocbq *iocbq) 8343 { 8344 struct lpfc_nodelist *ndlp = NULL; 8345 uint16_t rpi = 0, vpi = 0; 8346 struct lpfc_vport *vport = NULL; 8347 8348 /* The rpi in the ulpContext is vport-sensitive. */ 8349 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 8350 rpi = iocbq->iocb.ulpContext; 8351 8352 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8353 "3092 Port generated ABTS async event " 8354 "on vpi %d rpi %d status 0x%x\n", 8355 vpi, rpi, iocbq->iocb.ulpStatus); 8356 8357 vport = lpfc_find_vport_by_vpid(phba, vpi); 8358 if (!vport) 8359 goto err_exit; 8360 ndlp = lpfc_findnode_rpi(vport, rpi); 8361 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8362 goto err_exit; 8363 8364 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 8365 lpfc_sli_abts_recover_port(vport, ndlp); 8366 return; 8367 8368 err_exit: 8369 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8370 "3095 Event Context not found, no " 8371 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 8372 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 8373 vpi, rpi); 8374 } 8375 8376 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 8377 * @phba: pointer to HBA context object. 8378 * @ndlp: nodelist pointer for the impacted rport. 8379 * @axri: pointer to the wcqe containing the failed exchange. 8380 * 8381 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 8382 * port. The port generates this event when an abort exchange request to an 8383 * rport fails twice in succession with no reply. The abort could be originated 8384 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 8385 */ 8386 void 8387 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 8388 struct lpfc_nodelist *ndlp, 8389 struct sli4_wcqe_xri_aborted *axri) 8390 { 8391 struct lpfc_vport *vport; 8392 uint32_t ext_status = 0; 8393 8394 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 8395 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8396 "3115 Node Context not found, driver " 8397 "ignoring abts err event\n"); 8398 return; 8399 } 8400 8401 vport = ndlp->vport; 8402 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8403 "3116 Port generated FCP XRI ABORT event on " 8404 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 8405 ndlp->vport->vpi, ndlp->nlp_rpi, 8406 bf_get(lpfc_wcqe_xa_xri, axri), 8407 bf_get(lpfc_wcqe_xa_status, axri), 8408 axri->parameter); 8409 8410 /* 8411 * Catch the ABTS protocol failure case. Older OCe FW releases returned 8412 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 8413 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 8414 */ 8415 ext_status = axri->parameter & WCQE_PARAM_MASK; 8416 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 8417 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 8418 lpfc_sli_abts_recover_port(vport, ndlp); 8419 } 8420 8421 /** 8422 * lpfc_sli_async_event_handler - ASYNC iocb handler function 8423 * @phba: Pointer to HBA context object. 8424 * @pring: Pointer to driver SLI ring object. 8425 * @iocbq: Pointer to iocb object. 8426 * 8427 * This function is called by the slow ring event handler 8428 * function when there is an ASYNC event iocb in the ring. 8429 * This function is called with no lock held. 8430 * Currently this function handles only temperature related 8431 * ASYNC events. The function decodes the temperature sensor 8432 * event message and posts events for the management applications. 8433 **/ 8434 static void 8435 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 8436 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 8437 { 8438 IOCB_t *icmd; 8439 uint16_t evt_code; 8440 struct temp_event temp_event_data; 8441 struct Scsi_Host *shost; 8442 uint32_t *iocb_w; 8443 8444 icmd = &iocbq->iocb; 8445 evt_code = icmd->un.asyncstat.evt_code; 8446 8447 switch (evt_code) { 8448 case ASYNC_TEMP_WARN: 8449 case ASYNC_TEMP_SAFE: 8450 temp_event_data.data = (uint32_t) icmd->ulpContext; 8451 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 8452 if (evt_code == ASYNC_TEMP_WARN) { 8453 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 8454 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8455 "0347 Adapter is very hot, please take " 8456 "corrective action. temperature : %d Celsius\n", 8457 (uint32_t) icmd->ulpContext); 8458 } else { 8459 temp_event_data.event_code = LPFC_NORMAL_TEMP; 8460 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 8461 "0340 Adapter temperature is OK now. " 8462 "temperature : %d Celsius\n", 8463 (uint32_t) icmd->ulpContext); 8464 } 8465 8466 /* Send temperature change event to applications */ 8467 shost = lpfc_shost_from_vport(phba->pport); 8468 fc_host_post_vendor_event(shost, fc_get_event_number(), 8469 sizeof(temp_event_data), (char *) &temp_event_data, 8470 LPFC_NL_VENDOR_ID); 8471 break; 8472 case ASYNC_STATUS_CN: 8473 lpfc_sli_abts_err_handler(phba, iocbq); 8474 break; 8475 default: 8476 iocb_w = (uint32_t *) icmd; 8477 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8478 "0346 Ring %d handler: unexpected ASYNC_STATUS" 8479 " evt_code 0x%x\n" 8480 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 8481 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 8482 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 8483 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 8484 pring->ringno, icmd->un.asyncstat.evt_code, 8485 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 8486 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 8487 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 8488 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 8489 8490 break; 8491 } 8492 } 8493 8494 8495 /** 8496 * lpfc_sli_setup - SLI ring setup function 8497 * @phba: Pointer to HBA context object. 8498 * 8499 * lpfc_sli_setup sets up rings of the SLI interface with 8500 * number of iocbs per ring and iotags. This function is 8501 * called while driver attach to the HBA and before the 8502 * interrupts are enabled. So there is no need for locking. 8503 * 8504 * This function always returns 0. 8505 **/ 8506 int 8507 lpfc_sli_setup(struct lpfc_hba *phba) 8508 { 8509 int i, totiocbsize = 0; 8510 struct lpfc_sli *psli = &phba->sli; 8511 struct lpfc_sli_ring *pring; 8512 8513 psli->num_rings = MAX_CONFIGURED_RINGS; 8514 psli->sli_flag = 0; 8515 psli->fcp_ring = LPFC_FCP_RING; 8516 psli->next_ring = LPFC_FCP_NEXT_RING; 8517 psli->extra_ring = LPFC_EXTRA_RING; 8518 8519 psli->iocbq_lookup = NULL; 8520 psli->iocbq_lookup_len = 0; 8521 psli->last_iotag = 0; 8522 8523 for (i = 0; i < psli->num_rings; i++) { 8524 pring = &psli->ring[i]; 8525 switch (i) { 8526 case LPFC_FCP_RING: /* ring 0 - FCP */ 8527 /* numCiocb and numRiocb are used in config_port */ 8528 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8529 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8530 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8531 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8532 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8533 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8534 pring->sizeCiocb = (phba->sli_rev == 3) ? 8535 SLI3_IOCB_CMD_SIZE : 8536 SLI2_IOCB_CMD_SIZE; 8537 pring->sizeRiocb = (phba->sli_rev == 3) ? 8538 SLI3_IOCB_RSP_SIZE : 8539 SLI2_IOCB_RSP_SIZE; 8540 pring->iotag_ctr = 0; 8541 pring->iotag_max = 8542 (phba->cfg_hba_queue_depth * 2); 8543 pring->fast_iotag = pring->iotag_max; 8544 pring->num_mask = 0; 8545 break; 8546 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8547 /* numCiocb and numRiocb are used in config_port */ 8548 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 8549 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 8550 pring->sizeCiocb = (phba->sli_rev == 3) ? 8551 SLI3_IOCB_CMD_SIZE : 8552 SLI2_IOCB_CMD_SIZE; 8553 pring->sizeRiocb = (phba->sli_rev == 3) ? 8554 SLI3_IOCB_RSP_SIZE : 8555 SLI2_IOCB_RSP_SIZE; 8556 pring->iotag_max = phba->cfg_hba_queue_depth; 8557 pring->num_mask = 0; 8558 break; 8559 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 8560 /* numCiocb and numRiocb are used in config_port */ 8561 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 8562 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 8563 pring->sizeCiocb = (phba->sli_rev == 3) ? 8564 SLI3_IOCB_CMD_SIZE : 8565 SLI2_IOCB_CMD_SIZE; 8566 pring->sizeRiocb = (phba->sli_rev == 3) ? 8567 SLI3_IOCB_RSP_SIZE : 8568 SLI2_IOCB_RSP_SIZE; 8569 pring->fast_iotag = 0; 8570 pring->iotag_ctr = 0; 8571 pring->iotag_max = 4096; 8572 pring->lpfc_sli_rcv_async_status = 8573 lpfc_sli_async_event_handler; 8574 pring->num_mask = LPFC_MAX_RING_MASK; 8575 pring->prt[0].profile = 0; /* Mask 0 */ 8576 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 8577 pring->prt[0].type = FC_TYPE_ELS; 8578 pring->prt[0].lpfc_sli_rcv_unsol_event = 8579 lpfc_els_unsol_event; 8580 pring->prt[1].profile = 0; /* Mask 1 */ 8581 pring->prt[1].rctl = FC_RCTL_ELS_REP; 8582 pring->prt[1].type = FC_TYPE_ELS; 8583 pring->prt[1].lpfc_sli_rcv_unsol_event = 8584 lpfc_els_unsol_event; 8585 pring->prt[2].profile = 0; /* Mask 2 */ 8586 /* NameServer Inquiry */ 8587 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 8588 /* NameServer */ 8589 pring->prt[2].type = FC_TYPE_CT; 8590 pring->prt[2].lpfc_sli_rcv_unsol_event = 8591 lpfc_ct_unsol_event; 8592 pring->prt[3].profile = 0; /* Mask 3 */ 8593 /* NameServer response */ 8594 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 8595 /* NameServer */ 8596 pring->prt[3].type = FC_TYPE_CT; 8597 pring->prt[3].lpfc_sli_rcv_unsol_event = 8598 lpfc_ct_unsol_event; 8599 /* abort unsolicited sequence */ 8600 pring->prt[4].profile = 0; /* Mask 4 */ 8601 pring->prt[4].rctl = FC_RCTL_BA_ABTS; 8602 pring->prt[4].type = FC_TYPE_BLS; 8603 pring->prt[4].lpfc_sli_rcv_unsol_event = 8604 lpfc_sli4_ct_abort_unsol_event; 8605 break; 8606 } 8607 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 8608 (pring->numRiocb * pring->sizeRiocb); 8609 } 8610 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 8611 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 8612 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 8613 "SLI2 SLIM Data: x%x x%lx\n", 8614 phba->brd_no, totiocbsize, 8615 (unsigned long) MAX_SLIM_IOCB_SIZE); 8616 } 8617 if (phba->cfg_multi_ring_support == 2) 8618 lpfc_extra_ring_setup(phba); 8619 8620 return 0; 8621 } 8622 8623 /** 8624 * lpfc_sli_queue_setup - Queue initialization function 8625 * @phba: Pointer to HBA context object. 8626 * 8627 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 8628 * ring. This function also initializes ring indices of each ring. 8629 * This function is called during the initialization of the SLI 8630 * interface of an HBA. 8631 * This function is called with no lock held and always returns 8632 * 1. 8633 **/ 8634 int 8635 lpfc_sli_queue_setup(struct lpfc_hba *phba) 8636 { 8637 struct lpfc_sli *psli; 8638 struct lpfc_sli_ring *pring; 8639 int i; 8640 8641 psli = &phba->sli; 8642 spin_lock_irq(&phba->hbalock); 8643 INIT_LIST_HEAD(&psli->mboxq); 8644 INIT_LIST_HEAD(&psli->mboxq_cmpl); 8645 /* Initialize list headers for txq and txcmplq as double linked lists */ 8646 for (i = 0; i < psli->num_rings; i++) { 8647 pring = &psli->ring[i]; 8648 pring->ringno = i; 8649 pring->next_cmdidx = 0; 8650 pring->local_getidx = 0; 8651 pring->cmdidx = 0; 8652 INIT_LIST_HEAD(&pring->txq); 8653 INIT_LIST_HEAD(&pring->txcmplq); 8654 INIT_LIST_HEAD(&pring->iocb_continueq); 8655 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 8656 INIT_LIST_HEAD(&pring->postbufq); 8657 } 8658 spin_unlock_irq(&phba->hbalock); 8659 return 1; 8660 } 8661 8662 /** 8663 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 8664 * @phba: Pointer to HBA context object. 8665 * 8666 * This routine flushes the mailbox command subsystem. It will unconditionally 8667 * flush all the mailbox commands in the three possible stages in the mailbox 8668 * command sub-system: pending mailbox command queue; the outstanding mailbox 8669 * command; and completed mailbox command queue. It is caller's responsibility 8670 * to make sure that the driver is in the proper state to flush the mailbox 8671 * command sub-system. Namely, the posting of mailbox commands into the 8672 * pending mailbox command queue from the various clients must be stopped; 8673 * either the HBA is in a state that it will never works on the outstanding 8674 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 8675 * mailbox command has been completed. 8676 **/ 8677 static void 8678 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 8679 { 8680 LIST_HEAD(completions); 8681 struct lpfc_sli *psli = &phba->sli; 8682 LPFC_MBOXQ_t *pmb; 8683 unsigned long iflag; 8684 8685 /* Flush all the mailbox commands in the mbox system */ 8686 spin_lock_irqsave(&phba->hbalock, iflag); 8687 /* The pending mailbox command queue */ 8688 list_splice_init(&phba->sli.mboxq, &completions); 8689 /* The outstanding active mailbox command */ 8690 if (psli->mbox_active) { 8691 list_add_tail(&psli->mbox_active->list, &completions); 8692 psli->mbox_active = NULL; 8693 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8694 } 8695 /* The completed mailbox command queue */ 8696 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 8697 spin_unlock_irqrestore(&phba->hbalock, iflag); 8698 8699 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 8700 while (!list_empty(&completions)) { 8701 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 8702 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 8703 if (pmb->mbox_cmpl) 8704 pmb->mbox_cmpl(phba, pmb); 8705 } 8706 } 8707 8708 /** 8709 * lpfc_sli_host_down - Vport cleanup function 8710 * @vport: Pointer to virtual port object. 8711 * 8712 * lpfc_sli_host_down is called to clean up the resources 8713 * associated with a vport before destroying virtual 8714 * port data structures. 8715 * This function does following operations: 8716 * - Free discovery resources associated with this virtual 8717 * port. 8718 * - Free iocbs associated with this virtual port in 8719 * the txq. 8720 * - Send abort for all iocb commands associated with this 8721 * vport in txcmplq. 8722 * 8723 * This function is called with no lock held and always returns 1. 8724 **/ 8725 int 8726 lpfc_sli_host_down(struct lpfc_vport *vport) 8727 { 8728 LIST_HEAD(completions); 8729 struct lpfc_hba *phba = vport->phba; 8730 struct lpfc_sli *psli = &phba->sli; 8731 struct lpfc_sli_ring *pring; 8732 struct lpfc_iocbq *iocb, *next_iocb; 8733 int i; 8734 unsigned long flags = 0; 8735 uint16_t prev_pring_flag; 8736 8737 lpfc_cleanup_discovery_resources(vport); 8738 8739 spin_lock_irqsave(&phba->hbalock, flags); 8740 for (i = 0; i < psli->num_rings; i++) { 8741 pring = &psli->ring[i]; 8742 prev_pring_flag = pring->flag; 8743 /* Only slow rings */ 8744 if (pring->ringno == LPFC_ELS_RING) { 8745 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8746 /* Set the lpfc data pending flag */ 8747 set_bit(LPFC_DATA_READY, &phba->data_flags); 8748 } 8749 /* 8750 * Error everything on the txq since these iocbs have not been 8751 * given to the FW yet. 8752 */ 8753 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 8754 if (iocb->vport != vport) 8755 continue; 8756 list_move_tail(&iocb->list, &completions); 8757 pring->txq_cnt--; 8758 } 8759 8760 /* Next issue ABTS for everything on the txcmplq */ 8761 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 8762 list) { 8763 if (iocb->vport != vport) 8764 continue; 8765 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 8766 } 8767 8768 pring->flag = prev_pring_flag; 8769 } 8770 8771 spin_unlock_irqrestore(&phba->hbalock, flags); 8772 8773 /* Cancel all the IOCBs from the completions list */ 8774 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8775 IOERR_SLI_DOWN); 8776 return 1; 8777 } 8778 8779 /** 8780 * lpfc_sli_hba_down - Resource cleanup function for the HBA 8781 * @phba: Pointer to HBA context object. 8782 * 8783 * This function cleans up all iocb, buffers, mailbox commands 8784 * while shutting down the HBA. This function is called with no 8785 * lock held and always returns 1. 8786 * This function does the following to cleanup driver resources: 8787 * - Free discovery resources for each virtual port 8788 * - Cleanup any pending fabric iocbs 8789 * - Iterate through the iocb txq and free each entry 8790 * in the list. 8791 * - Free up any buffer posted to the HBA 8792 * - Free mailbox commands in the mailbox queue. 8793 **/ 8794 int 8795 lpfc_sli_hba_down(struct lpfc_hba *phba) 8796 { 8797 LIST_HEAD(completions); 8798 struct lpfc_sli *psli = &phba->sli; 8799 struct lpfc_sli_ring *pring; 8800 struct lpfc_dmabuf *buf_ptr; 8801 unsigned long flags = 0; 8802 int i; 8803 8804 /* Shutdown the mailbox command sub-system */ 8805 lpfc_sli_mbox_sys_shutdown(phba); 8806 8807 lpfc_hba_down_prep(phba); 8808 8809 lpfc_fabric_abort_hba(phba); 8810 8811 spin_lock_irqsave(&phba->hbalock, flags); 8812 for (i = 0; i < psli->num_rings; i++) { 8813 pring = &psli->ring[i]; 8814 /* Only slow rings */ 8815 if (pring->ringno == LPFC_ELS_RING) { 8816 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8817 /* Set the lpfc data pending flag */ 8818 set_bit(LPFC_DATA_READY, &phba->data_flags); 8819 } 8820 8821 /* 8822 * Error everything on the txq since these iocbs have not been 8823 * given to the FW yet. 8824 */ 8825 list_splice_init(&pring->txq, &completions); 8826 pring->txq_cnt = 0; 8827 8828 } 8829 spin_unlock_irqrestore(&phba->hbalock, flags); 8830 8831 /* Cancel all the IOCBs from the completions list */ 8832 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8833 IOERR_SLI_DOWN); 8834 8835 spin_lock_irqsave(&phba->hbalock, flags); 8836 list_splice_init(&phba->elsbuf, &completions); 8837 phba->elsbuf_cnt = 0; 8838 phba->elsbuf_prev_cnt = 0; 8839 spin_unlock_irqrestore(&phba->hbalock, flags); 8840 8841 while (!list_empty(&completions)) { 8842 list_remove_head(&completions, buf_ptr, 8843 struct lpfc_dmabuf, list); 8844 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 8845 kfree(buf_ptr); 8846 } 8847 8848 /* Return any active mbox cmds */ 8849 del_timer_sync(&psli->mbox_tmo); 8850 8851 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 8852 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 8853 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 8854 8855 return 1; 8856 } 8857 8858 /** 8859 * lpfc_sli_pcimem_bcopy - SLI memory copy function 8860 * @srcp: Source memory pointer. 8861 * @destp: Destination memory pointer. 8862 * @cnt: Number of words required to be copied. 8863 * 8864 * This function is used for copying data between driver memory 8865 * and the SLI memory. This function also changes the endianness 8866 * of each word if native endianness is different from SLI 8867 * endianness. This function can be called with or without 8868 * lock. 8869 **/ 8870 void 8871 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 8872 { 8873 uint32_t *src = srcp; 8874 uint32_t *dest = destp; 8875 uint32_t ldata; 8876 int i; 8877 8878 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 8879 ldata = *src; 8880 ldata = le32_to_cpu(ldata); 8881 *dest = ldata; 8882 src++; 8883 dest++; 8884 } 8885 } 8886 8887 8888 /** 8889 * lpfc_sli_bemem_bcopy - SLI memory copy function 8890 * @srcp: Source memory pointer. 8891 * @destp: Destination memory pointer. 8892 * @cnt: Number of words required to be copied. 8893 * 8894 * This function is used for copying data between a data structure 8895 * with big endian representation to local endianness. 8896 * This function can be called with or without lock. 8897 **/ 8898 void 8899 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 8900 { 8901 uint32_t *src = srcp; 8902 uint32_t *dest = destp; 8903 uint32_t ldata; 8904 int i; 8905 8906 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 8907 ldata = *src; 8908 ldata = be32_to_cpu(ldata); 8909 *dest = ldata; 8910 src++; 8911 dest++; 8912 } 8913 } 8914 8915 /** 8916 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 8917 * @phba: Pointer to HBA context object. 8918 * @pring: Pointer to driver SLI ring object. 8919 * @mp: Pointer to driver buffer object. 8920 * 8921 * This function is called with no lock held. 8922 * It always return zero after adding the buffer to the postbufq 8923 * buffer list. 8924 **/ 8925 int 8926 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8927 struct lpfc_dmabuf *mp) 8928 { 8929 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 8930 later */ 8931 spin_lock_irq(&phba->hbalock); 8932 list_add_tail(&mp->list, &pring->postbufq); 8933 pring->postbufq_cnt++; 8934 spin_unlock_irq(&phba->hbalock); 8935 return 0; 8936 } 8937 8938 /** 8939 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 8940 * @phba: Pointer to HBA context object. 8941 * 8942 * When HBQ is enabled, buffers are searched based on tags. This function 8943 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 8944 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 8945 * does not conflict with tags of buffer posted for unsolicited events. 8946 * The function returns the allocated tag. The function is called with 8947 * no locks held. 8948 **/ 8949 uint32_t 8950 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 8951 { 8952 spin_lock_irq(&phba->hbalock); 8953 phba->buffer_tag_count++; 8954 /* 8955 * Always set the QUE_BUFTAG_BIT to distiguish between 8956 * a tag assigned by HBQ. 8957 */ 8958 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 8959 spin_unlock_irq(&phba->hbalock); 8960 return phba->buffer_tag_count; 8961 } 8962 8963 /** 8964 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 8965 * @phba: Pointer to HBA context object. 8966 * @pring: Pointer to driver SLI ring object. 8967 * @tag: Buffer tag. 8968 * 8969 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 8970 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 8971 * iocb is posted to the response ring with the tag of the buffer. 8972 * This function searches the pring->postbufq list using the tag 8973 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 8974 * iocb. If the buffer is found then lpfc_dmabuf object of the 8975 * buffer is returned to the caller else NULL is returned. 8976 * This function is called with no lock held. 8977 **/ 8978 struct lpfc_dmabuf * 8979 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8980 uint32_t tag) 8981 { 8982 struct lpfc_dmabuf *mp, *next_mp; 8983 struct list_head *slp = &pring->postbufq; 8984 8985 /* Search postbufq, from the beginning, looking for a match on tag */ 8986 spin_lock_irq(&phba->hbalock); 8987 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 8988 if (mp->buffer_tag == tag) { 8989 list_del_init(&mp->list); 8990 pring->postbufq_cnt--; 8991 spin_unlock_irq(&phba->hbalock); 8992 return mp; 8993 } 8994 } 8995 8996 spin_unlock_irq(&phba->hbalock); 8997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8998 "0402 Cannot find virtual addr for buffer tag on " 8999 "ring %d Data x%lx x%p x%p x%x\n", 9000 pring->ringno, (unsigned long) tag, 9001 slp->next, slp->prev, pring->postbufq_cnt); 9002 9003 return NULL; 9004 } 9005 9006 /** 9007 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 9008 * @phba: Pointer to HBA context object. 9009 * @pring: Pointer to driver SLI ring object. 9010 * @phys: DMA address of the buffer. 9011 * 9012 * This function searches the buffer list using the dma_address 9013 * of unsolicited event to find the driver's lpfc_dmabuf object 9014 * corresponding to the dma_address. The function returns the 9015 * lpfc_dmabuf object if a buffer is found else it returns NULL. 9016 * This function is called by the ct and els unsolicited event 9017 * handlers to get the buffer associated with the unsolicited 9018 * event. 9019 * 9020 * This function is called with no lock held. 9021 **/ 9022 struct lpfc_dmabuf * 9023 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9024 dma_addr_t phys) 9025 { 9026 struct lpfc_dmabuf *mp, *next_mp; 9027 struct list_head *slp = &pring->postbufq; 9028 9029 /* Search postbufq, from the beginning, looking for a match on phys */ 9030 spin_lock_irq(&phba->hbalock); 9031 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9032 if (mp->phys == phys) { 9033 list_del_init(&mp->list); 9034 pring->postbufq_cnt--; 9035 spin_unlock_irq(&phba->hbalock); 9036 return mp; 9037 } 9038 } 9039 9040 spin_unlock_irq(&phba->hbalock); 9041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9042 "0410 Cannot find virtual addr for mapped buf on " 9043 "ring %d Data x%llx x%p x%p x%x\n", 9044 pring->ringno, (unsigned long long)phys, 9045 slp->next, slp->prev, pring->postbufq_cnt); 9046 return NULL; 9047 } 9048 9049 /** 9050 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 9051 * @phba: Pointer to HBA context object. 9052 * @cmdiocb: Pointer to driver command iocb object. 9053 * @rspiocb: Pointer to driver response iocb object. 9054 * 9055 * This function is the completion handler for the abort iocbs for 9056 * ELS commands. This function is called from the ELS ring event 9057 * handler with no lock held. This function frees memory resources 9058 * associated with the abort iocb. 9059 **/ 9060 static void 9061 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9062 struct lpfc_iocbq *rspiocb) 9063 { 9064 IOCB_t *irsp = &rspiocb->iocb; 9065 uint16_t abort_iotag, abort_context; 9066 struct lpfc_iocbq *abort_iocb = NULL; 9067 9068 if (irsp->ulpStatus) { 9069 9070 /* 9071 * Assume that the port already completed and returned, or 9072 * will return the iocb. Just Log the message. 9073 */ 9074 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 9075 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 9076 9077 spin_lock_irq(&phba->hbalock); 9078 if (phba->sli_rev < LPFC_SLI_REV4) { 9079 if (abort_iotag != 0 && 9080 abort_iotag <= phba->sli.last_iotag) 9081 abort_iocb = 9082 phba->sli.iocbq_lookup[abort_iotag]; 9083 } else 9084 /* For sli4 the abort_tag is the XRI, 9085 * so the abort routine puts the iotag of the iocb 9086 * being aborted in the context field of the abort 9087 * IOCB. 9088 */ 9089 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 9090 9091 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 9092 "0327 Cannot abort els iocb %p " 9093 "with tag %x context %x, abort status %x, " 9094 "abort code %x\n", 9095 abort_iocb, abort_iotag, abort_context, 9096 irsp->ulpStatus, irsp->un.ulpWord[4]); 9097 9098 spin_unlock_irq(&phba->hbalock); 9099 } 9100 lpfc_sli_release_iocbq(phba, cmdiocb); 9101 return; 9102 } 9103 9104 /** 9105 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 9106 * @phba: Pointer to HBA context object. 9107 * @cmdiocb: Pointer to driver command iocb object. 9108 * @rspiocb: Pointer to driver response iocb object. 9109 * 9110 * The function is called from SLI ring event handler with no 9111 * lock held. This function is the completion handler for ELS commands 9112 * which are aborted. The function frees memory resources used for 9113 * the aborted ELS commands. 9114 **/ 9115 static void 9116 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9117 struct lpfc_iocbq *rspiocb) 9118 { 9119 IOCB_t *irsp = &rspiocb->iocb; 9120 9121 /* ELS cmd tag <ulpIoTag> completes */ 9122 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9123 "0139 Ignoring ELS cmd tag x%x completion Data: " 9124 "x%x x%x x%x\n", 9125 irsp->ulpIoTag, irsp->ulpStatus, 9126 irsp->un.ulpWord[4], irsp->ulpTimeout); 9127 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 9128 lpfc_ct_free_iocb(phba, cmdiocb); 9129 else 9130 lpfc_els_free_iocb(phba, cmdiocb); 9131 return; 9132 } 9133 9134 /** 9135 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 9136 * @phba: Pointer to HBA context object. 9137 * @pring: Pointer to driver SLI ring object. 9138 * @cmdiocb: Pointer to driver command iocb object. 9139 * 9140 * This function issues an abort iocb for the provided command iocb down to 9141 * the port. Other than the case the outstanding command iocb is an abort 9142 * request, this function issues abort out unconditionally. This function is 9143 * called with hbalock held. The function returns 0 when it fails due to 9144 * memory allocation failure or when the command iocb is an abort request. 9145 **/ 9146 static int 9147 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9148 struct lpfc_iocbq *cmdiocb) 9149 { 9150 struct lpfc_vport *vport = cmdiocb->vport; 9151 struct lpfc_iocbq *abtsiocbp; 9152 IOCB_t *icmd = NULL; 9153 IOCB_t *iabt = NULL; 9154 int retval; 9155 9156 /* 9157 * There are certain command types we don't want to abort. And we 9158 * don't want to abort commands that are already in the process of 9159 * being aborted. 9160 */ 9161 icmd = &cmdiocb->iocb; 9162 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9163 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9164 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9165 return 0; 9166 9167 /* issue ABTS for this IOCB based on iotag */ 9168 abtsiocbp = __lpfc_sli_get_iocbq(phba); 9169 if (abtsiocbp == NULL) 9170 return 0; 9171 9172 /* This signals the response to set the correct status 9173 * before calling the completion handler 9174 */ 9175 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 9176 9177 iabt = &abtsiocbp->iocb; 9178 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 9179 iabt->un.acxri.abortContextTag = icmd->ulpContext; 9180 if (phba->sli_rev == LPFC_SLI_REV4) { 9181 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 9182 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 9183 } 9184 else 9185 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 9186 iabt->ulpLe = 1; 9187 iabt->ulpClass = icmd->ulpClass; 9188 9189 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9190 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 9191 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 9192 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 9193 9194 if (phba->link_state >= LPFC_LINK_UP) 9195 iabt->ulpCommand = CMD_ABORT_XRI_CN; 9196 else 9197 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 9198 9199 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 9200 9201 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 9202 "0339 Abort xri x%x, original iotag x%x, " 9203 "abort cmd iotag x%x\n", 9204 iabt->un.acxri.abortIoTag, 9205 iabt->un.acxri.abortContextTag, 9206 abtsiocbp->iotag); 9207 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 9208 9209 if (retval) 9210 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9211 9212 /* 9213 * Caller to this routine should check for IOCB_ERROR 9214 * and handle it properly. This routine no longer removes 9215 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9216 */ 9217 return retval; 9218 } 9219 9220 /** 9221 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 9222 * @phba: Pointer to HBA context object. 9223 * @pring: Pointer to driver SLI ring object. 9224 * @cmdiocb: Pointer to driver command iocb object. 9225 * 9226 * This function issues an abort iocb for the provided command iocb. In case 9227 * of unloading, the abort iocb will not be issued to commands on the ELS 9228 * ring. Instead, the callback function shall be changed to those commands 9229 * so that nothing happens when them finishes. This function is called with 9230 * hbalock held. The function returns 0 when the command iocb is an abort 9231 * request. 9232 **/ 9233 int 9234 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9235 struct lpfc_iocbq *cmdiocb) 9236 { 9237 struct lpfc_vport *vport = cmdiocb->vport; 9238 int retval = IOCB_ERROR; 9239 IOCB_t *icmd = NULL; 9240 9241 /* 9242 * There are certain command types we don't want to abort. And we 9243 * don't want to abort commands that are already in the process of 9244 * being aborted. 9245 */ 9246 icmd = &cmdiocb->iocb; 9247 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9248 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9249 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9250 return 0; 9251 9252 /* 9253 * If we're unloading, don't abort iocb on the ELS ring, but change 9254 * the callback so that nothing happens when it finishes. 9255 */ 9256 if ((vport->load_flag & FC_UNLOADING) && 9257 (pring->ringno == LPFC_ELS_RING)) { 9258 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 9259 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 9260 else 9261 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 9262 goto abort_iotag_exit; 9263 } 9264 9265 /* Now, we try to issue the abort to the cmdiocb out */ 9266 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 9267 9268 abort_iotag_exit: 9269 /* 9270 * Caller to this routine should check for IOCB_ERROR 9271 * and handle it properly. This routine no longer removes 9272 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9273 */ 9274 return retval; 9275 } 9276 9277 /** 9278 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 9279 * @phba: Pointer to HBA context object. 9280 * @pring: Pointer to driver SLI ring object. 9281 * 9282 * This function aborts all iocbs in the given ring and frees all the iocb 9283 * objects in txq. This function issues abort iocbs unconditionally for all 9284 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 9285 * to complete before the return of this function. The caller is not required 9286 * to hold any locks. 9287 **/ 9288 static void 9289 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 9290 { 9291 LIST_HEAD(completions); 9292 struct lpfc_iocbq *iocb, *next_iocb; 9293 9294 if (pring->ringno == LPFC_ELS_RING) 9295 lpfc_fabric_abort_hba(phba); 9296 9297 spin_lock_irq(&phba->hbalock); 9298 9299 /* Take off all the iocbs on txq for cancelling */ 9300 list_splice_init(&pring->txq, &completions); 9301 pring->txq_cnt = 0; 9302 9303 /* Next issue ABTS for everything on the txcmplq */ 9304 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 9305 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 9306 9307 spin_unlock_irq(&phba->hbalock); 9308 9309 /* Cancel all the IOCBs from the completions list */ 9310 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9311 IOERR_SLI_ABORTED); 9312 } 9313 9314 /** 9315 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9316 * @phba: pointer to lpfc HBA data structure. 9317 * 9318 * This routine will abort all pending and outstanding iocbs to an HBA. 9319 **/ 9320 void 9321 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 9322 { 9323 struct lpfc_sli *psli = &phba->sli; 9324 struct lpfc_sli_ring *pring; 9325 int i; 9326 9327 for (i = 0; i < psli->num_rings; i++) { 9328 pring = &psli->ring[i]; 9329 lpfc_sli_iocb_ring_abort(phba, pring); 9330 } 9331 } 9332 9333 /** 9334 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 9335 * @iocbq: Pointer to driver iocb object. 9336 * @vport: Pointer to driver virtual port object. 9337 * @tgt_id: SCSI ID of the target. 9338 * @lun_id: LUN ID of the scsi device. 9339 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 9340 * 9341 * This function acts as an iocb filter for functions which abort or count 9342 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 9343 * 0 if the filtering criteria is met for the given iocb and will return 9344 * 1 if the filtering criteria is not met. 9345 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 9346 * given iocb is for the SCSI device specified by vport, tgt_id and 9347 * lun_id parameter. 9348 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 9349 * given iocb is for the SCSI target specified by vport and tgt_id 9350 * parameters. 9351 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 9352 * given iocb is for the SCSI host associated with the given vport. 9353 * This function is called with no locks held. 9354 **/ 9355 static int 9356 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 9357 uint16_t tgt_id, uint64_t lun_id, 9358 lpfc_ctx_cmd ctx_cmd) 9359 { 9360 struct lpfc_scsi_buf *lpfc_cmd; 9361 int rc = 1; 9362 9363 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 9364 return rc; 9365 9366 if (iocbq->vport != vport) 9367 return rc; 9368 9369 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 9370 9371 if (lpfc_cmd->pCmd == NULL) 9372 return rc; 9373 9374 switch (ctx_cmd) { 9375 case LPFC_CTX_LUN: 9376 if ((lpfc_cmd->rdata->pnode) && 9377 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 9378 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 9379 rc = 0; 9380 break; 9381 case LPFC_CTX_TGT: 9382 if ((lpfc_cmd->rdata->pnode) && 9383 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 9384 rc = 0; 9385 break; 9386 case LPFC_CTX_HOST: 9387 rc = 0; 9388 break; 9389 default: 9390 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 9391 __func__, ctx_cmd); 9392 break; 9393 } 9394 9395 return rc; 9396 } 9397 9398 /** 9399 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 9400 * @vport: Pointer to virtual port. 9401 * @tgt_id: SCSI ID of the target. 9402 * @lun_id: LUN ID of the scsi device. 9403 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9404 * 9405 * This function returns number of FCP commands pending for the vport. 9406 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 9407 * commands pending on the vport associated with SCSI device specified 9408 * by tgt_id and lun_id parameters. 9409 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 9410 * commands pending on the vport associated with SCSI target specified 9411 * by tgt_id parameter. 9412 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 9413 * commands pending on the vport. 9414 * This function returns the number of iocbs which satisfy the filter. 9415 * This function is called without any lock held. 9416 **/ 9417 int 9418 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 9419 lpfc_ctx_cmd ctx_cmd) 9420 { 9421 struct lpfc_hba *phba = vport->phba; 9422 struct lpfc_iocbq *iocbq; 9423 int sum, i; 9424 9425 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 9426 iocbq = phba->sli.iocbq_lookup[i]; 9427 9428 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 9429 ctx_cmd) == 0) 9430 sum++; 9431 } 9432 9433 return sum; 9434 } 9435 9436 /** 9437 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 9438 * @phba: Pointer to HBA context object 9439 * @cmdiocb: Pointer to command iocb object. 9440 * @rspiocb: Pointer to response iocb object. 9441 * 9442 * This function is called when an aborted FCP iocb completes. This 9443 * function is called by the ring event handler with no lock held. 9444 * This function frees the iocb. 9445 **/ 9446 void 9447 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9448 struct lpfc_iocbq *rspiocb) 9449 { 9450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9451 "3096 ABORT_XRI_CN completing on xri x%x " 9452 "original iotag x%x, abort cmd iotag x%x " 9453 "status 0x%x, reason 0x%x\n", 9454 cmdiocb->iocb.un.acxri.abortContextTag, 9455 cmdiocb->iocb.un.acxri.abortIoTag, 9456 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 9457 rspiocb->iocb.un.ulpWord[4]); 9458 lpfc_sli_release_iocbq(phba, cmdiocb); 9459 return; 9460 } 9461 9462 /** 9463 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 9464 * @vport: Pointer to virtual port. 9465 * @pring: Pointer to driver SLI ring object. 9466 * @tgt_id: SCSI ID of the target. 9467 * @lun_id: LUN ID of the scsi device. 9468 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9469 * 9470 * This function sends an abort command for every SCSI command 9471 * associated with the given virtual port pending on the ring 9472 * filtered by lpfc_sli_validate_fcp_iocb function. 9473 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 9474 * FCP iocbs associated with lun specified by tgt_id and lun_id 9475 * parameters 9476 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 9477 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 9478 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 9479 * FCP iocbs associated with virtual port. 9480 * This function returns number of iocbs it failed to abort. 9481 * This function is called with no locks held. 9482 **/ 9483 int 9484 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 9485 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 9486 { 9487 struct lpfc_hba *phba = vport->phba; 9488 struct lpfc_iocbq *iocbq; 9489 struct lpfc_iocbq *abtsiocb; 9490 IOCB_t *cmd = NULL; 9491 int errcnt = 0, ret_val = 0; 9492 int i; 9493 9494 for (i = 1; i <= phba->sli.last_iotag; i++) { 9495 iocbq = phba->sli.iocbq_lookup[i]; 9496 9497 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 9498 abort_cmd) != 0) 9499 continue; 9500 9501 /* issue ABTS for this IOCB based on iotag */ 9502 abtsiocb = lpfc_sli_get_iocbq(phba); 9503 if (abtsiocb == NULL) { 9504 errcnt++; 9505 continue; 9506 } 9507 9508 cmd = &iocbq->iocb; 9509 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 9510 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 9511 if (phba->sli_rev == LPFC_SLI_REV4) 9512 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 9513 else 9514 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 9515 abtsiocb->iocb.ulpLe = 1; 9516 abtsiocb->iocb.ulpClass = cmd->ulpClass; 9517 abtsiocb->vport = phba->pport; 9518 9519 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9520 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 9521 if (iocbq->iocb_flag & LPFC_IO_FCP) 9522 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 9523 9524 if (lpfc_is_link_up(phba)) 9525 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 9526 else 9527 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 9528 9529 /* Setup callback routine and issue the command. */ 9530 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 9531 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 9532 abtsiocb, 0); 9533 if (ret_val == IOCB_ERROR) { 9534 lpfc_sli_release_iocbq(phba, abtsiocb); 9535 errcnt++; 9536 continue; 9537 } 9538 } 9539 9540 return errcnt; 9541 } 9542 9543 /** 9544 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 9545 * @phba: Pointer to HBA context object. 9546 * @cmdiocbq: Pointer to command iocb. 9547 * @rspiocbq: Pointer to response iocb. 9548 * 9549 * This function is the completion handler for iocbs issued using 9550 * lpfc_sli_issue_iocb_wait function. This function is called by the 9551 * ring event handler function without any lock held. This function 9552 * can be called from both worker thread context and interrupt 9553 * context. This function also can be called from other thread which 9554 * cleans up the SLI layer objects. 9555 * This function copy the contents of the response iocb to the 9556 * response iocb memory object provided by the caller of 9557 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 9558 * sleeps for the iocb completion. 9559 **/ 9560 static void 9561 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 9562 struct lpfc_iocbq *cmdiocbq, 9563 struct lpfc_iocbq *rspiocbq) 9564 { 9565 wait_queue_head_t *pdone_q; 9566 unsigned long iflags; 9567 struct lpfc_scsi_buf *lpfc_cmd; 9568 9569 spin_lock_irqsave(&phba->hbalock, iflags); 9570 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 9571 if (cmdiocbq->context2 && rspiocbq) 9572 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 9573 &rspiocbq->iocb, sizeof(IOCB_t)); 9574 9575 /* Set the exchange busy flag for task management commands */ 9576 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 9577 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 9578 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 9579 cur_iocbq); 9580 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 9581 } 9582 9583 pdone_q = cmdiocbq->context_un.wait_queue; 9584 if (pdone_q) 9585 wake_up(pdone_q); 9586 spin_unlock_irqrestore(&phba->hbalock, iflags); 9587 return; 9588 } 9589 9590 /** 9591 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 9592 * @phba: Pointer to HBA context object.. 9593 * @piocbq: Pointer to command iocb. 9594 * @flag: Flag to test. 9595 * 9596 * This routine grabs the hbalock and then test the iocb_flag to 9597 * see if the passed in flag is set. 9598 * Returns: 9599 * 1 if flag is set. 9600 * 0 if flag is not set. 9601 **/ 9602 static int 9603 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 9604 struct lpfc_iocbq *piocbq, uint32_t flag) 9605 { 9606 unsigned long iflags; 9607 int ret; 9608 9609 spin_lock_irqsave(&phba->hbalock, iflags); 9610 ret = piocbq->iocb_flag & flag; 9611 spin_unlock_irqrestore(&phba->hbalock, iflags); 9612 return ret; 9613 9614 } 9615 9616 /** 9617 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 9618 * @phba: Pointer to HBA context object.. 9619 * @pring: Pointer to sli ring. 9620 * @piocb: Pointer to command iocb. 9621 * @prspiocbq: Pointer to response iocb. 9622 * @timeout: Timeout in number of seconds. 9623 * 9624 * This function issues the iocb to firmware and waits for the 9625 * iocb to complete. If the iocb command is not 9626 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 9627 * Caller should not free the iocb resources if this function 9628 * returns IOCB_TIMEDOUT. 9629 * The function waits for the iocb completion using an 9630 * non-interruptible wait. 9631 * This function will sleep while waiting for iocb completion. 9632 * So, this function should not be called from any context which 9633 * does not allow sleeping. Due to the same reason, this function 9634 * cannot be called with interrupt disabled. 9635 * This function assumes that the iocb completions occur while 9636 * this function sleep. So, this function cannot be called from 9637 * the thread which process iocb completion for this ring. 9638 * This function clears the iocb_flag of the iocb object before 9639 * issuing the iocb and the iocb completion handler sets this 9640 * flag and wakes this thread when the iocb completes. 9641 * The contents of the response iocb will be copied to prspiocbq 9642 * by the completion handler when the command completes. 9643 * This function returns IOCB_SUCCESS when success. 9644 * This function is called with no lock held. 9645 **/ 9646 int 9647 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 9648 uint32_t ring_number, 9649 struct lpfc_iocbq *piocb, 9650 struct lpfc_iocbq *prspiocbq, 9651 uint32_t timeout) 9652 { 9653 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9654 long timeleft, timeout_req = 0; 9655 int retval = IOCB_SUCCESS; 9656 uint32_t creg_val; 9657 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 9658 /* 9659 * If the caller has provided a response iocbq buffer, then context2 9660 * is NULL or its an error. 9661 */ 9662 if (prspiocbq) { 9663 if (piocb->context2) 9664 return IOCB_ERROR; 9665 piocb->context2 = prspiocbq; 9666 } 9667 9668 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 9669 piocb->context_un.wait_queue = &done_q; 9670 piocb->iocb_flag &= ~LPFC_IO_WAKE; 9671 9672 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9673 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9674 return IOCB_ERROR; 9675 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 9676 writel(creg_val, phba->HCregaddr); 9677 readl(phba->HCregaddr); /* flush */ 9678 } 9679 9680 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 9681 SLI_IOCB_RET_IOCB); 9682 if (retval == IOCB_SUCCESS) { 9683 timeout_req = timeout * HZ; 9684 timeleft = wait_event_timeout(done_q, 9685 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 9686 timeout_req); 9687 9688 if (piocb->iocb_flag & LPFC_IO_WAKE) { 9689 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9690 "0331 IOCB wake signaled\n"); 9691 } else if (timeleft == 0) { 9692 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9693 "0338 IOCB wait timeout error - no " 9694 "wake response Data x%x\n", timeout); 9695 retval = IOCB_TIMEDOUT; 9696 } else { 9697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9698 "0330 IOCB wake NOT set, " 9699 "Data x%x x%lx\n", 9700 timeout, (timeleft / jiffies)); 9701 retval = IOCB_TIMEDOUT; 9702 } 9703 } else if (retval == IOCB_BUSY) { 9704 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9705 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 9706 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 9707 return retval; 9708 } else { 9709 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9710 "0332 IOCB wait issue failed, Data x%x\n", 9711 retval); 9712 retval = IOCB_ERROR; 9713 } 9714 9715 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9716 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9717 return IOCB_ERROR; 9718 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 9719 writel(creg_val, phba->HCregaddr); 9720 readl(phba->HCregaddr); /* flush */ 9721 } 9722 9723 if (prspiocbq) 9724 piocb->context2 = NULL; 9725 9726 piocb->context_un.wait_queue = NULL; 9727 piocb->iocb_cmpl = NULL; 9728 return retval; 9729 } 9730 9731 /** 9732 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 9733 * @phba: Pointer to HBA context object. 9734 * @pmboxq: Pointer to driver mailbox object. 9735 * @timeout: Timeout in number of seconds. 9736 * 9737 * This function issues the mailbox to firmware and waits for the 9738 * mailbox command to complete. If the mailbox command is not 9739 * completed within timeout seconds, it returns MBX_TIMEOUT. 9740 * The function waits for the mailbox completion using an 9741 * interruptible wait. If the thread is woken up due to a 9742 * signal, MBX_TIMEOUT error is returned to the caller. Caller 9743 * should not free the mailbox resources, if this function returns 9744 * MBX_TIMEOUT. 9745 * This function will sleep while waiting for mailbox completion. 9746 * So, this function should not be called from any context which 9747 * does not allow sleeping. Due to the same reason, this function 9748 * cannot be called with interrupt disabled. 9749 * This function assumes that the mailbox completion occurs while 9750 * this function sleep. So, this function cannot be called from 9751 * the worker thread which processes mailbox completion. 9752 * This function is called in the context of HBA management 9753 * applications. 9754 * This function returns MBX_SUCCESS when successful. 9755 * This function is called with no lock held. 9756 **/ 9757 int 9758 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 9759 uint32_t timeout) 9760 { 9761 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9762 int retval; 9763 unsigned long flag; 9764 9765 /* The caller must leave context1 empty. */ 9766 if (pmboxq->context1) 9767 return MBX_NOT_FINISHED; 9768 9769 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 9770 /* setup wake call as IOCB callback */ 9771 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 9772 /* setup context field to pass wait_queue pointer to wake function */ 9773 pmboxq->context1 = &done_q; 9774 9775 /* now issue the command */ 9776 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 9777 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 9778 wait_event_interruptible_timeout(done_q, 9779 pmboxq->mbox_flag & LPFC_MBX_WAKE, 9780 timeout * HZ); 9781 9782 spin_lock_irqsave(&phba->hbalock, flag); 9783 pmboxq->context1 = NULL; 9784 /* 9785 * if LPFC_MBX_WAKE flag is set the mailbox is completed 9786 * else do not free the resources. 9787 */ 9788 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 9789 retval = MBX_SUCCESS; 9790 lpfc_sli4_swap_str(phba, pmboxq); 9791 } else { 9792 retval = MBX_TIMEOUT; 9793 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 9794 } 9795 spin_unlock_irqrestore(&phba->hbalock, flag); 9796 } 9797 9798 return retval; 9799 } 9800 9801 /** 9802 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 9803 * @phba: Pointer to HBA context. 9804 * 9805 * This function is called to shutdown the driver's mailbox sub-system. 9806 * It first marks the mailbox sub-system is in a block state to prevent 9807 * the asynchronous mailbox command from issued off the pending mailbox 9808 * command queue. If the mailbox command sub-system shutdown is due to 9809 * HBA error conditions such as EEH or ERATT, this routine shall invoke 9810 * the mailbox sub-system flush routine to forcefully bring down the 9811 * mailbox sub-system. Otherwise, if it is due to normal condition (such 9812 * as with offline or HBA function reset), this routine will wait for the 9813 * outstanding mailbox command to complete before invoking the mailbox 9814 * sub-system flush routine to gracefully bring down mailbox sub-system. 9815 **/ 9816 void 9817 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 9818 { 9819 struct lpfc_sli *psli = &phba->sli; 9820 unsigned long timeout; 9821 9822 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 9823 9824 spin_lock_irq(&phba->hbalock); 9825 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9826 9827 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9828 /* Determine how long we might wait for the active mailbox 9829 * command to be gracefully completed by firmware. 9830 */ 9831 if (phba->sli.mbox_active) 9832 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 9833 phba->sli.mbox_active) * 9834 1000) + jiffies; 9835 spin_unlock_irq(&phba->hbalock); 9836 9837 while (phba->sli.mbox_active) { 9838 /* Check active mailbox complete status every 2ms */ 9839 msleep(2); 9840 if (time_after(jiffies, timeout)) 9841 /* Timeout, let the mailbox flush routine to 9842 * forcefully release active mailbox command 9843 */ 9844 break; 9845 } 9846 } else 9847 spin_unlock_irq(&phba->hbalock); 9848 9849 lpfc_sli_mbox_sys_flush(phba); 9850 } 9851 9852 /** 9853 * lpfc_sli_eratt_read - read sli-3 error attention events 9854 * @phba: Pointer to HBA context. 9855 * 9856 * This function is called to read the SLI3 device error attention registers 9857 * for possible error attention events. The caller must hold the hostlock 9858 * with spin_lock_irq(). 9859 * 9860 * This function returns 1 when there is Error Attention in the Host Attention 9861 * Register and returns 0 otherwise. 9862 **/ 9863 static int 9864 lpfc_sli_eratt_read(struct lpfc_hba *phba) 9865 { 9866 uint32_t ha_copy; 9867 9868 /* Read chip Host Attention (HA) register */ 9869 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 9870 goto unplug_err; 9871 9872 if (ha_copy & HA_ERATT) { 9873 /* Read host status register to retrieve error event */ 9874 if (lpfc_sli_read_hs(phba)) 9875 goto unplug_err; 9876 9877 /* Check if there is a deferred error condition is active */ 9878 if ((HS_FFER1 & phba->work_hs) && 9879 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 9880 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 9881 phba->hba_flag |= DEFER_ERATT; 9882 /* Clear all interrupt enable conditions */ 9883 writel(0, phba->HCregaddr); 9884 readl(phba->HCregaddr); 9885 } 9886 9887 /* Set the driver HA work bitmap */ 9888 phba->work_ha |= HA_ERATT; 9889 /* Indicate polling handles this ERATT */ 9890 phba->hba_flag |= HBA_ERATT_HANDLED; 9891 return 1; 9892 } 9893 return 0; 9894 9895 unplug_err: 9896 /* Set the driver HS work bitmap */ 9897 phba->work_hs |= UNPLUG_ERR; 9898 /* Set the driver HA work bitmap */ 9899 phba->work_ha |= HA_ERATT; 9900 /* Indicate polling handles this ERATT */ 9901 phba->hba_flag |= HBA_ERATT_HANDLED; 9902 return 1; 9903 } 9904 9905 /** 9906 * lpfc_sli4_eratt_read - read sli-4 error attention events 9907 * @phba: Pointer to HBA context. 9908 * 9909 * This function is called to read the SLI4 device error attention registers 9910 * for possible error attention events. The caller must hold the hostlock 9911 * with spin_lock_irq(). 9912 * 9913 * This function returns 1 when there is Error Attention in the Host Attention 9914 * Register and returns 0 otherwise. 9915 **/ 9916 static int 9917 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 9918 { 9919 uint32_t uerr_sta_hi, uerr_sta_lo; 9920 uint32_t if_type, portsmphr; 9921 struct lpfc_register portstat_reg; 9922 9923 /* 9924 * For now, use the SLI4 device internal unrecoverable error 9925 * registers for error attention. This can be changed later. 9926 */ 9927 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9928 switch (if_type) { 9929 case LPFC_SLI_INTF_IF_TYPE_0: 9930 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 9931 &uerr_sta_lo) || 9932 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 9933 &uerr_sta_hi)) { 9934 phba->work_hs |= UNPLUG_ERR; 9935 phba->work_ha |= HA_ERATT; 9936 phba->hba_flag |= HBA_ERATT_HANDLED; 9937 return 1; 9938 } 9939 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 9940 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 9941 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9942 "1423 HBA Unrecoverable error: " 9943 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 9944 "ue_mask_lo_reg=0x%x, " 9945 "ue_mask_hi_reg=0x%x\n", 9946 uerr_sta_lo, uerr_sta_hi, 9947 phba->sli4_hba.ue_mask_lo, 9948 phba->sli4_hba.ue_mask_hi); 9949 phba->work_status[0] = uerr_sta_lo; 9950 phba->work_status[1] = uerr_sta_hi; 9951 phba->work_ha |= HA_ERATT; 9952 phba->hba_flag |= HBA_ERATT_HANDLED; 9953 return 1; 9954 } 9955 break; 9956 case LPFC_SLI_INTF_IF_TYPE_2: 9957 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9958 &portstat_reg.word0) || 9959 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9960 &portsmphr)){ 9961 phba->work_hs |= UNPLUG_ERR; 9962 phba->work_ha |= HA_ERATT; 9963 phba->hba_flag |= HBA_ERATT_HANDLED; 9964 return 1; 9965 } 9966 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 9967 phba->work_status[0] = 9968 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 9969 phba->work_status[1] = 9970 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 9971 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9972 "2885 Port Status Event: " 9973 "port status reg 0x%x, " 9974 "port smphr reg 0x%x, " 9975 "error 1=0x%x, error 2=0x%x\n", 9976 portstat_reg.word0, 9977 portsmphr, 9978 phba->work_status[0], 9979 phba->work_status[1]); 9980 phba->work_ha |= HA_ERATT; 9981 phba->hba_flag |= HBA_ERATT_HANDLED; 9982 return 1; 9983 } 9984 break; 9985 case LPFC_SLI_INTF_IF_TYPE_1: 9986 default: 9987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9988 "2886 HBA Error Attention on unsupported " 9989 "if type %d.", if_type); 9990 return 1; 9991 } 9992 9993 return 0; 9994 } 9995 9996 /** 9997 * lpfc_sli_check_eratt - check error attention events 9998 * @phba: Pointer to HBA context. 9999 * 10000 * This function is called from timer soft interrupt context to check HBA's 10001 * error attention register bit for error attention events. 10002 * 10003 * This function returns 1 when there is Error Attention in the Host Attention 10004 * Register and returns 0 otherwise. 10005 **/ 10006 int 10007 lpfc_sli_check_eratt(struct lpfc_hba *phba) 10008 { 10009 uint32_t ha_copy; 10010 10011 /* If somebody is waiting to handle an eratt, don't process it 10012 * here. The brdkill function will do this. 10013 */ 10014 if (phba->link_flag & LS_IGNORE_ERATT) 10015 return 0; 10016 10017 /* Check if interrupt handler handles this ERATT */ 10018 spin_lock_irq(&phba->hbalock); 10019 if (phba->hba_flag & HBA_ERATT_HANDLED) { 10020 /* Interrupt handler has handled ERATT */ 10021 spin_unlock_irq(&phba->hbalock); 10022 return 0; 10023 } 10024 10025 /* 10026 * If there is deferred error attention, do not check for error 10027 * attention 10028 */ 10029 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10030 spin_unlock_irq(&phba->hbalock); 10031 return 0; 10032 } 10033 10034 /* If PCI channel is offline, don't process it */ 10035 if (unlikely(pci_channel_offline(phba->pcidev))) { 10036 spin_unlock_irq(&phba->hbalock); 10037 return 0; 10038 } 10039 10040 switch (phba->sli_rev) { 10041 case LPFC_SLI_REV2: 10042 case LPFC_SLI_REV3: 10043 /* Read chip Host Attention (HA) register */ 10044 ha_copy = lpfc_sli_eratt_read(phba); 10045 break; 10046 case LPFC_SLI_REV4: 10047 /* Read device Uncoverable Error (UERR) registers */ 10048 ha_copy = lpfc_sli4_eratt_read(phba); 10049 break; 10050 default: 10051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10052 "0299 Invalid SLI revision (%d)\n", 10053 phba->sli_rev); 10054 ha_copy = 0; 10055 break; 10056 } 10057 spin_unlock_irq(&phba->hbalock); 10058 10059 return ha_copy; 10060 } 10061 10062 /** 10063 * lpfc_intr_state_check - Check device state for interrupt handling 10064 * @phba: Pointer to HBA context. 10065 * 10066 * This inline routine checks whether a device or its PCI slot is in a state 10067 * that the interrupt should be handled. 10068 * 10069 * This function returns 0 if the device or the PCI slot is in a state that 10070 * interrupt should be handled, otherwise -EIO. 10071 */ 10072 static inline int 10073 lpfc_intr_state_check(struct lpfc_hba *phba) 10074 { 10075 /* If the pci channel is offline, ignore all the interrupts */ 10076 if (unlikely(pci_channel_offline(phba->pcidev))) 10077 return -EIO; 10078 10079 /* Update device level interrupt statistics */ 10080 phba->sli.slistat.sli_intr++; 10081 10082 /* Ignore all interrupts during initialization. */ 10083 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10084 return -EIO; 10085 10086 return 0; 10087 } 10088 10089 /** 10090 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 10091 * @irq: Interrupt number. 10092 * @dev_id: The device context pointer. 10093 * 10094 * This function is directly called from the PCI layer as an interrupt 10095 * service routine when device with SLI-3 interface spec is enabled with 10096 * MSI-X multi-message interrupt mode and there are slow-path events in 10097 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 10098 * interrupt mode, this function is called as part of the device-level 10099 * interrupt handler. When the PCI slot is in error recovery or the HBA 10100 * is undergoing initialization, the interrupt handler will not process 10101 * the interrupt. The link attention and ELS ring attention events are 10102 * handled by the worker thread. The interrupt handler signals the worker 10103 * thread and returns for these events. This function is called without 10104 * any lock held. It gets the hbalock to access and update SLI data 10105 * structures. 10106 * 10107 * This function returns IRQ_HANDLED when interrupt is handled else it 10108 * returns IRQ_NONE. 10109 **/ 10110 irqreturn_t 10111 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 10112 { 10113 struct lpfc_hba *phba; 10114 uint32_t ha_copy, hc_copy; 10115 uint32_t work_ha_copy; 10116 unsigned long status; 10117 unsigned long iflag; 10118 uint32_t control; 10119 10120 MAILBOX_t *mbox, *pmbox; 10121 struct lpfc_vport *vport; 10122 struct lpfc_nodelist *ndlp; 10123 struct lpfc_dmabuf *mp; 10124 LPFC_MBOXQ_t *pmb; 10125 int rc; 10126 10127 /* 10128 * Get the driver's phba structure from the dev_id and 10129 * assume the HBA is not interrupting. 10130 */ 10131 phba = (struct lpfc_hba *)dev_id; 10132 10133 if (unlikely(!phba)) 10134 return IRQ_NONE; 10135 10136 /* 10137 * Stuff needs to be attented to when this function is invoked as an 10138 * individual interrupt handler in MSI-X multi-message interrupt mode 10139 */ 10140 if (phba->intr_type == MSIX) { 10141 /* Check device state for handling interrupt */ 10142 if (lpfc_intr_state_check(phba)) 10143 return IRQ_NONE; 10144 /* Need to read HA REG for slow-path events */ 10145 spin_lock_irqsave(&phba->hbalock, iflag); 10146 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10147 goto unplug_error; 10148 /* If somebody is waiting to handle an eratt don't process it 10149 * here. The brdkill function will do this. 10150 */ 10151 if (phba->link_flag & LS_IGNORE_ERATT) 10152 ha_copy &= ~HA_ERATT; 10153 /* Check the need for handling ERATT in interrupt handler */ 10154 if (ha_copy & HA_ERATT) { 10155 if (phba->hba_flag & HBA_ERATT_HANDLED) 10156 /* ERATT polling has handled ERATT */ 10157 ha_copy &= ~HA_ERATT; 10158 else 10159 /* Indicate interrupt handler handles ERATT */ 10160 phba->hba_flag |= HBA_ERATT_HANDLED; 10161 } 10162 10163 /* 10164 * If there is deferred error attention, do not check for any 10165 * interrupt. 10166 */ 10167 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10168 spin_unlock_irqrestore(&phba->hbalock, iflag); 10169 return IRQ_NONE; 10170 } 10171 10172 /* Clear up only attention source related to slow-path */ 10173 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 10174 goto unplug_error; 10175 10176 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 10177 HC_LAINT_ENA | HC_ERINT_ENA), 10178 phba->HCregaddr); 10179 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 10180 phba->HAregaddr); 10181 writel(hc_copy, phba->HCregaddr); 10182 readl(phba->HAregaddr); /* flush */ 10183 spin_unlock_irqrestore(&phba->hbalock, iflag); 10184 } else 10185 ha_copy = phba->ha_copy; 10186 10187 work_ha_copy = ha_copy & phba->work_ha_mask; 10188 10189 if (work_ha_copy) { 10190 if (work_ha_copy & HA_LATT) { 10191 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 10192 /* 10193 * Turn off Link Attention interrupts 10194 * until CLEAR_LA done 10195 */ 10196 spin_lock_irqsave(&phba->hbalock, iflag); 10197 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 10198 if (lpfc_readl(phba->HCregaddr, &control)) 10199 goto unplug_error; 10200 control &= ~HC_LAINT_ENA; 10201 writel(control, phba->HCregaddr); 10202 readl(phba->HCregaddr); /* flush */ 10203 spin_unlock_irqrestore(&phba->hbalock, iflag); 10204 } 10205 else 10206 work_ha_copy &= ~HA_LATT; 10207 } 10208 10209 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 10210 /* 10211 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 10212 * the only slow ring. 10213 */ 10214 status = (work_ha_copy & 10215 (HA_RXMASK << (4*LPFC_ELS_RING))); 10216 status >>= (4*LPFC_ELS_RING); 10217 if (status & HA_RXMASK) { 10218 spin_lock_irqsave(&phba->hbalock, iflag); 10219 if (lpfc_readl(phba->HCregaddr, &control)) 10220 goto unplug_error; 10221 10222 lpfc_debugfs_slow_ring_trc(phba, 10223 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 10224 control, status, 10225 (uint32_t)phba->sli.slistat.sli_intr); 10226 10227 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 10228 lpfc_debugfs_slow_ring_trc(phba, 10229 "ISR Disable ring:" 10230 "pwork:x%x hawork:x%x wait:x%x", 10231 phba->work_ha, work_ha_copy, 10232 (uint32_t)((unsigned long) 10233 &phba->work_waitq)); 10234 10235 control &= 10236 ~(HC_R0INT_ENA << LPFC_ELS_RING); 10237 writel(control, phba->HCregaddr); 10238 readl(phba->HCregaddr); /* flush */ 10239 } 10240 else { 10241 lpfc_debugfs_slow_ring_trc(phba, 10242 "ISR slow ring: pwork:" 10243 "x%x hawork:x%x wait:x%x", 10244 phba->work_ha, work_ha_copy, 10245 (uint32_t)((unsigned long) 10246 &phba->work_waitq)); 10247 } 10248 spin_unlock_irqrestore(&phba->hbalock, iflag); 10249 } 10250 } 10251 spin_lock_irqsave(&phba->hbalock, iflag); 10252 if (work_ha_copy & HA_ERATT) { 10253 if (lpfc_sli_read_hs(phba)) 10254 goto unplug_error; 10255 /* 10256 * Check if there is a deferred error condition 10257 * is active 10258 */ 10259 if ((HS_FFER1 & phba->work_hs) && 10260 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10261 HS_FFER6 | HS_FFER7 | HS_FFER8) & 10262 phba->work_hs)) { 10263 phba->hba_flag |= DEFER_ERATT; 10264 /* Clear all interrupt enable conditions */ 10265 writel(0, phba->HCregaddr); 10266 readl(phba->HCregaddr); 10267 } 10268 } 10269 10270 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 10271 pmb = phba->sli.mbox_active; 10272 pmbox = &pmb->u.mb; 10273 mbox = phba->mbox; 10274 vport = pmb->vport; 10275 10276 /* First check out the status word */ 10277 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 10278 if (pmbox->mbxOwner != OWN_HOST) { 10279 spin_unlock_irqrestore(&phba->hbalock, iflag); 10280 /* 10281 * Stray Mailbox Interrupt, mbxCommand <cmd> 10282 * mbxStatus <status> 10283 */ 10284 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10285 LOG_SLI, 10286 "(%d):0304 Stray Mailbox " 10287 "Interrupt mbxCommand x%x " 10288 "mbxStatus x%x\n", 10289 (vport ? vport->vpi : 0), 10290 pmbox->mbxCommand, 10291 pmbox->mbxStatus); 10292 /* clear mailbox attention bit */ 10293 work_ha_copy &= ~HA_MBATT; 10294 } else { 10295 phba->sli.mbox_active = NULL; 10296 spin_unlock_irqrestore(&phba->hbalock, iflag); 10297 phba->last_completion_time = jiffies; 10298 del_timer(&phba->sli.mbox_tmo); 10299 if (pmb->mbox_cmpl) { 10300 lpfc_sli_pcimem_bcopy(mbox, pmbox, 10301 MAILBOX_CMD_SIZE); 10302 if (pmb->out_ext_byte_len && 10303 pmb->context2) 10304 lpfc_sli_pcimem_bcopy( 10305 phba->mbox_ext, 10306 pmb->context2, 10307 pmb->out_ext_byte_len); 10308 } 10309 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10310 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10311 10312 lpfc_debugfs_disc_trc(vport, 10313 LPFC_DISC_TRC_MBOX_VPORT, 10314 "MBOX dflt rpi: : " 10315 "status:x%x rpi:x%x", 10316 (uint32_t)pmbox->mbxStatus, 10317 pmbox->un.varWords[0], 0); 10318 10319 if (!pmbox->mbxStatus) { 10320 mp = (struct lpfc_dmabuf *) 10321 (pmb->context1); 10322 ndlp = (struct lpfc_nodelist *) 10323 pmb->context2; 10324 10325 /* Reg_LOGIN of dflt RPI was 10326 * successful. new lets get 10327 * rid of the RPI using the 10328 * same mbox buffer. 10329 */ 10330 lpfc_unreg_login(phba, 10331 vport->vpi, 10332 pmbox->un.varWords[0], 10333 pmb); 10334 pmb->mbox_cmpl = 10335 lpfc_mbx_cmpl_dflt_rpi; 10336 pmb->context1 = mp; 10337 pmb->context2 = ndlp; 10338 pmb->vport = vport; 10339 rc = lpfc_sli_issue_mbox(phba, 10340 pmb, 10341 MBX_NOWAIT); 10342 if (rc != MBX_BUSY) 10343 lpfc_printf_log(phba, 10344 KERN_ERR, 10345 LOG_MBOX | LOG_SLI, 10346 "0350 rc should have" 10347 "been MBX_BUSY\n"); 10348 if (rc != MBX_NOT_FINISHED) 10349 goto send_current_mbox; 10350 } 10351 } 10352 spin_lock_irqsave( 10353 &phba->pport->work_port_lock, 10354 iflag); 10355 phba->pport->work_port_events &= 10356 ~WORKER_MBOX_TMO; 10357 spin_unlock_irqrestore( 10358 &phba->pport->work_port_lock, 10359 iflag); 10360 lpfc_mbox_cmpl_put(phba, pmb); 10361 } 10362 } else 10363 spin_unlock_irqrestore(&phba->hbalock, iflag); 10364 10365 if ((work_ha_copy & HA_MBATT) && 10366 (phba->sli.mbox_active == NULL)) { 10367 send_current_mbox: 10368 /* Process next mailbox command if there is one */ 10369 do { 10370 rc = lpfc_sli_issue_mbox(phba, NULL, 10371 MBX_NOWAIT); 10372 } while (rc == MBX_NOT_FINISHED); 10373 if (rc != MBX_SUCCESS) 10374 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10375 LOG_SLI, "0349 rc should be " 10376 "MBX_SUCCESS\n"); 10377 } 10378 10379 spin_lock_irqsave(&phba->hbalock, iflag); 10380 phba->work_ha |= work_ha_copy; 10381 spin_unlock_irqrestore(&phba->hbalock, iflag); 10382 lpfc_worker_wake_up(phba); 10383 } 10384 return IRQ_HANDLED; 10385 unplug_error: 10386 spin_unlock_irqrestore(&phba->hbalock, iflag); 10387 return IRQ_HANDLED; 10388 10389 } /* lpfc_sli_sp_intr_handler */ 10390 10391 /** 10392 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 10393 * @irq: Interrupt number. 10394 * @dev_id: The device context pointer. 10395 * 10396 * This function is directly called from the PCI layer as an interrupt 10397 * service routine when device with SLI-3 interface spec is enabled with 10398 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 10399 * ring event in the HBA. However, when the device is enabled with either 10400 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 10401 * device-level interrupt handler. When the PCI slot is in error recovery 10402 * or the HBA is undergoing initialization, the interrupt handler will not 10403 * process the interrupt. The SCSI FCP fast-path ring event are handled in 10404 * the intrrupt context. This function is called without any lock held. 10405 * It gets the hbalock to access and update SLI data structures. 10406 * 10407 * This function returns IRQ_HANDLED when interrupt is handled else it 10408 * returns IRQ_NONE. 10409 **/ 10410 irqreturn_t 10411 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 10412 { 10413 struct lpfc_hba *phba; 10414 uint32_t ha_copy; 10415 unsigned long status; 10416 unsigned long iflag; 10417 10418 /* Get the driver's phba structure from the dev_id and 10419 * assume the HBA is not interrupting. 10420 */ 10421 phba = (struct lpfc_hba *) dev_id; 10422 10423 if (unlikely(!phba)) 10424 return IRQ_NONE; 10425 10426 /* 10427 * Stuff needs to be attented to when this function is invoked as an 10428 * individual interrupt handler in MSI-X multi-message interrupt mode 10429 */ 10430 if (phba->intr_type == MSIX) { 10431 /* Check device state for handling interrupt */ 10432 if (lpfc_intr_state_check(phba)) 10433 return IRQ_NONE; 10434 /* Need to read HA REG for FCP ring and other ring events */ 10435 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10436 return IRQ_HANDLED; 10437 /* Clear up only attention source related to fast-path */ 10438 spin_lock_irqsave(&phba->hbalock, iflag); 10439 /* 10440 * If there is deferred error attention, do not check for 10441 * any interrupt. 10442 */ 10443 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10444 spin_unlock_irqrestore(&phba->hbalock, iflag); 10445 return IRQ_NONE; 10446 } 10447 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 10448 phba->HAregaddr); 10449 readl(phba->HAregaddr); /* flush */ 10450 spin_unlock_irqrestore(&phba->hbalock, iflag); 10451 } else 10452 ha_copy = phba->ha_copy; 10453 10454 /* 10455 * Process all events on FCP ring. Take the optimized path for FCP IO. 10456 */ 10457 ha_copy &= ~(phba->work_ha_mask); 10458 10459 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10460 status >>= (4*LPFC_FCP_RING); 10461 if (status & HA_RXMASK) 10462 lpfc_sli_handle_fast_ring_event(phba, 10463 &phba->sli.ring[LPFC_FCP_RING], 10464 status); 10465 10466 if (phba->cfg_multi_ring_support == 2) { 10467 /* 10468 * Process all events on extra ring. Take the optimized path 10469 * for extra ring IO. 10470 */ 10471 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10472 status >>= (4*LPFC_EXTRA_RING); 10473 if (status & HA_RXMASK) { 10474 lpfc_sli_handle_fast_ring_event(phba, 10475 &phba->sli.ring[LPFC_EXTRA_RING], 10476 status); 10477 } 10478 } 10479 return IRQ_HANDLED; 10480 } /* lpfc_sli_fp_intr_handler */ 10481 10482 /** 10483 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 10484 * @irq: Interrupt number. 10485 * @dev_id: The device context pointer. 10486 * 10487 * This function is the HBA device-level interrupt handler to device with 10488 * SLI-3 interface spec, called from the PCI layer when either MSI or 10489 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 10490 * requires driver attention. This function invokes the slow-path interrupt 10491 * attention handling function and fast-path interrupt attention handling 10492 * function in turn to process the relevant HBA attention events. This 10493 * function is called without any lock held. It gets the hbalock to access 10494 * and update SLI data structures. 10495 * 10496 * This function returns IRQ_HANDLED when interrupt is handled, else it 10497 * returns IRQ_NONE. 10498 **/ 10499 irqreturn_t 10500 lpfc_sli_intr_handler(int irq, void *dev_id) 10501 { 10502 struct lpfc_hba *phba; 10503 irqreturn_t sp_irq_rc, fp_irq_rc; 10504 unsigned long status1, status2; 10505 uint32_t hc_copy; 10506 10507 /* 10508 * Get the driver's phba structure from the dev_id and 10509 * assume the HBA is not interrupting. 10510 */ 10511 phba = (struct lpfc_hba *) dev_id; 10512 10513 if (unlikely(!phba)) 10514 return IRQ_NONE; 10515 10516 /* Check device state for handling interrupt */ 10517 if (lpfc_intr_state_check(phba)) 10518 return IRQ_NONE; 10519 10520 spin_lock(&phba->hbalock); 10521 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 10522 spin_unlock(&phba->hbalock); 10523 return IRQ_HANDLED; 10524 } 10525 10526 if (unlikely(!phba->ha_copy)) { 10527 spin_unlock(&phba->hbalock); 10528 return IRQ_NONE; 10529 } else if (phba->ha_copy & HA_ERATT) { 10530 if (phba->hba_flag & HBA_ERATT_HANDLED) 10531 /* ERATT polling has handled ERATT */ 10532 phba->ha_copy &= ~HA_ERATT; 10533 else 10534 /* Indicate interrupt handler handles ERATT */ 10535 phba->hba_flag |= HBA_ERATT_HANDLED; 10536 } 10537 10538 /* 10539 * If there is deferred error attention, do not check for any interrupt. 10540 */ 10541 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10542 spin_unlock(&phba->hbalock); 10543 return IRQ_NONE; 10544 } 10545 10546 /* Clear attention sources except link and error attentions */ 10547 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 10548 spin_unlock(&phba->hbalock); 10549 return IRQ_HANDLED; 10550 } 10551 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 10552 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 10553 phba->HCregaddr); 10554 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 10555 writel(hc_copy, phba->HCregaddr); 10556 readl(phba->HAregaddr); /* flush */ 10557 spin_unlock(&phba->hbalock); 10558 10559 /* 10560 * Invokes slow-path host attention interrupt handling as appropriate. 10561 */ 10562 10563 /* status of events with mailbox and link attention */ 10564 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 10565 10566 /* status of events with ELS ring */ 10567 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 10568 status2 >>= (4*LPFC_ELS_RING); 10569 10570 if (status1 || (status2 & HA_RXMASK)) 10571 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 10572 else 10573 sp_irq_rc = IRQ_NONE; 10574 10575 /* 10576 * Invoke fast-path host attention interrupt handling as appropriate. 10577 */ 10578 10579 /* status of events with FCP ring */ 10580 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10581 status1 >>= (4*LPFC_FCP_RING); 10582 10583 /* status of events with extra ring */ 10584 if (phba->cfg_multi_ring_support == 2) { 10585 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10586 status2 >>= (4*LPFC_EXTRA_RING); 10587 } else 10588 status2 = 0; 10589 10590 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 10591 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 10592 else 10593 fp_irq_rc = IRQ_NONE; 10594 10595 /* Return device-level interrupt handling status */ 10596 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 10597 } /* lpfc_sli_intr_handler */ 10598 10599 /** 10600 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 10601 * @phba: pointer to lpfc hba data structure. 10602 * 10603 * This routine is invoked by the worker thread to process all the pending 10604 * SLI4 FCP abort XRI events. 10605 **/ 10606 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 10607 { 10608 struct lpfc_cq_event *cq_event; 10609 10610 /* First, declare the fcp xri abort event has been handled */ 10611 spin_lock_irq(&phba->hbalock); 10612 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 10613 spin_unlock_irq(&phba->hbalock); 10614 /* Now, handle all the fcp xri abort events */ 10615 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 10616 /* Get the first event from the head of the event queue */ 10617 spin_lock_irq(&phba->hbalock); 10618 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10619 cq_event, struct lpfc_cq_event, list); 10620 spin_unlock_irq(&phba->hbalock); 10621 /* Notify aborted XRI for FCP work queue */ 10622 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10623 /* Free the event processed back to the free pool */ 10624 lpfc_sli4_cq_event_release(phba, cq_event); 10625 } 10626 } 10627 10628 /** 10629 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 10630 * @phba: pointer to lpfc hba data structure. 10631 * 10632 * This routine is invoked by the worker thread to process all the pending 10633 * SLI4 els abort xri events. 10634 **/ 10635 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 10636 { 10637 struct lpfc_cq_event *cq_event; 10638 10639 /* First, declare the els xri abort event has been handled */ 10640 spin_lock_irq(&phba->hbalock); 10641 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 10642 spin_unlock_irq(&phba->hbalock); 10643 /* Now, handle all the els xri abort events */ 10644 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 10645 /* Get the first event from the head of the event queue */ 10646 spin_lock_irq(&phba->hbalock); 10647 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10648 cq_event, struct lpfc_cq_event, list); 10649 spin_unlock_irq(&phba->hbalock); 10650 /* Notify aborted XRI for ELS work queue */ 10651 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10652 /* Free the event processed back to the free pool */ 10653 lpfc_sli4_cq_event_release(phba, cq_event); 10654 } 10655 } 10656 10657 /** 10658 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 10659 * @phba: pointer to lpfc hba data structure 10660 * @pIocbIn: pointer to the rspiocbq 10661 * @pIocbOut: pointer to the cmdiocbq 10662 * @wcqe: pointer to the complete wcqe 10663 * 10664 * This routine transfers the fields of a command iocbq to a response iocbq 10665 * by copying all the IOCB fields from command iocbq and transferring the 10666 * completion status information from the complete wcqe. 10667 **/ 10668 static void 10669 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 10670 struct lpfc_iocbq *pIocbIn, 10671 struct lpfc_iocbq *pIocbOut, 10672 struct lpfc_wcqe_complete *wcqe) 10673 { 10674 unsigned long iflags; 10675 uint32_t status; 10676 size_t offset = offsetof(struct lpfc_iocbq, iocb); 10677 10678 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 10679 sizeof(struct lpfc_iocbq) - offset); 10680 /* Map WCQE parameters into irspiocb parameters */ 10681 status = bf_get(lpfc_wcqe_c_status, wcqe); 10682 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 10683 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 10684 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 10685 pIocbIn->iocb.un.fcpi.fcpi_parm = 10686 pIocbOut->iocb.un.fcpi.fcpi_parm - 10687 wcqe->total_data_placed; 10688 else 10689 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10690 else { 10691 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10692 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 10693 } 10694 10695 /* Convert BG errors for completion status */ 10696 if (status == CQE_STATUS_DI_ERROR) { 10697 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 10698 10699 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 10700 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 10701 else 10702 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 10703 10704 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 10705 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 10706 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10707 BGS_GUARD_ERR_MASK; 10708 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 10709 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10710 BGS_APPTAG_ERR_MASK; 10711 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 10712 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10713 BGS_REFTAG_ERR_MASK; 10714 10715 /* Check to see if there was any good data before the error */ 10716 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 10717 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10718 BGS_HI_WATER_MARK_PRESENT_MASK; 10719 pIocbIn->iocb.unsli3.sli3_bg.bghm = 10720 wcqe->total_data_placed; 10721 } 10722 10723 /* 10724 * Set ALL the error bits to indicate we don't know what 10725 * type of error it is. 10726 */ 10727 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 10728 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 10729 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 10730 BGS_GUARD_ERR_MASK); 10731 } 10732 10733 /* Pick up HBA exchange busy condition */ 10734 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 10735 spin_lock_irqsave(&phba->hbalock, iflags); 10736 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 10737 spin_unlock_irqrestore(&phba->hbalock, iflags); 10738 } 10739 } 10740 10741 /** 10742 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 10743 * @phba: Pointer to HBA context object. 10744 * @wcqe: Pointer to work-queue completion queue entry. 10745 * 10746 * This routine handles an ELS work-queue completion event and construct 10747 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 10748 * discovery engine to handle. 10749 * 10750 * Return: Pointer to the receive IOCBQ, NULL otherwise. 10751 **/ 10752 static struct lpfc_iocbq * 10753 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 10754 struct lpfc_iocbq *irspiocbq) 10755 { 10756 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10757 struct lpfc_iocbq *cmdiocbq; 10758 struct lpfc_wcqe_complete *wcqe; 10759 unsigned long iflags; 10760 10761 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 10762 spin_lock_irqsave(&phba->hbalock, iflags); 10763 pring->stats.iocb_event++; 10764 /* Look up the ELS command IOCB and create pseudo response IOCB */ 10765 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 10766 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10767 spin_unlock_irqrestore(&phba->hbalock, iflags); 10768 10769 if (unlikely(!cmdiocbq)) { 10770 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10771 "0386 ELS complete with no corresponding " 10772 "cmdiocb: iotag (%d)\n", 10773 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10774 lpfc_sli_release_iocbq(phba, irspiocbq); 10775 return NULL; 10776 } 10777 10778 /* Fake the irspiocbq and copy necessary response information */ 10779 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 10780 10781 return irspiocbq; 10782 } 10783 10784 /** 10785 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 10786 * @phba: Pointer to HBA context object. 10787 * @cqe: Pointer to mailbox completion queue entry. 10788 * 10789 * This routine process a mailbox completion queue entry with asynchrous 10790 * event. 10791 * 10792 * Return: true if work posted to worker thread, otherwise false. 10793 **/ 10794 static bool 10795 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 10796 { 10797 struct lpfc_cq_event *cq_event; 10798 unsigned long iflags; 10799 10800 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10801 "0392 Async Event: word0:x%x, word1:x%x, " 10802 "word2:x%x, word3:x%x\n", mcqe->word0, 10803 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 10804 10805 /* Allocate a new internal CQ_EVENT entry */ 10806 cq_event = lpfc_sli4_cq_event_alloc(phba); 10807 if (!cq_event) { 10808 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10809 "0394 Failed to allocate CQ_EVENT entry\n"); 10810 return false; 10811 } 10812 10813 /* Move the CQE into an asynchronous event entry */ 10814 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 10815 spin_lock_irqsave(&phba->hbalock, iflags); 10816 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 10817 /* Set the async event flag */ 10818 phba->hba_flag |= ASYNC_EVENT; 10819 spin_unlock_irqrestore(&phba->hbalock, iflags); 10820 10821 return true; 10822 } 10823 10824 /** 10825 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 10826 * @phba: Pointer to HBA context object. 10827 * @cqe: Pointer to mailbox completion queue entry. 10828 * 10829 * This routine process a mailbox completion queue entry with mailbox 10830 * completion event. 10831 * 10832 * Return: true if work posted to worker thread, otherwise false. 10833 **/ 10834 static bool 10835 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 10836 { 10837 uint32_t mcqe_status; 10838 MAILBOX_t *mbox, *pmbox; 10839 struct lpfc_mqe *mqe; 10840 struct lpfc_vport *vport; 10841 struct lpfc_nodelist *ndlp; 10842 struct lpfc_dmabuf *mp; 10843 unsigned long iflags; 10844 LPFC_MBOXQ_t *pmb; 10845 bool workposted = false; 10846 int rc; 10847 10848 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 10849 if (!bf_get(lpfc_trailer_completed, mcqe)) 10850 goto out_no_mqe_complete; 10851 10852 /* Get the reference to the active mbox command */ 10853 spin_lock_irqsave(&phba->hbalock, iflags); 10854 pmb = phba->sli.mbox_active; 10855 if (unlikely(!pmb)) { 10856 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 10857 "1832 No pending MBOX command to handle\n"); 10858 spin_unlock_irqrestore(&phba->hbalock, iflags); 10859 goto out_no_mqe_complete; 10860 } 10861 spin_unlock_irqrestore(&phba->hbalock, iflags); 10862 mqe = &pmb->u.mqe; 10863 pmbox = (MAILBOX_t *)&pmb->u.mqe; 10864 mbox = phba->mbox; 10865 vport = pmb->vport; 10866 10867 /* Reset heartbeat timer */ 10868 phba->last_completion_time = jiffies; 10869 del_timer(&phba->sli.mbox_tmo); 10870 10871 /* Move mbox data to caller's mailbox region, do endian swapping */ 10872 if (pmb->mbox_cmpl && mbox) 10873 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 10874 10875 /* 10876 * For mcqe errors, conditionally move a modified error code to 10877 * the mbox so that the error will not be missed. 10878 */ 10879 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 10880 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 10881 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 10882 bf_set(lpfc_mqe_status, mqe, 10883 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 10884 } 10885 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10886 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10887 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 10888 "MBOX dflt rpi: status:x%x rpi:x%x", 10889 mcqe_status, 10890 pmbox->un.varWords[0], 0); 10891 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 10892 mp = (struct lpfc_dmabuf *)(pmb->context1); 10893 ndlp = (struct lpfc_nodelist *)pmb->context2; 10894 /* Reg_LOGIN of dflt RPI was successful. Now lets get 10895 * RID of the PPI using the same mbox buffer. 10896 */ 10897 lpfc_unreg_login(phba, vport->vpi, 10898 pmbox->un.varWords[0], pmb); 10899 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 10900 pmb->context1 = mp; 10901 pmb->context2 = ndlp; 10902 pmb->vport = vport; 10903 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 10904 if (rc != MBX_BUSY) 10905 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10906 LOG_SLI, "0385 rc should " 10907 "have been MBX_BUSY\n"); 10908 if (rc != MBX_NOT_FINISHED) 10909 goto send_current_mbox; 10910 } 10911 } 10912 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 10913 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10914 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 10915 10916 /* There is mailbox completion work to do */ 10917 spin_lock_irqsave(&phba->hbalock, iflags); 10918 __lpfc_mbox_cmpl_put(phba, pmb); 10919 phba->work_ha |= HA_MBATT; 10920 spin_unlock_irqrestore(&phba->hbalock, iflags); 10921 workposted = true; 10922 10923 send_current_mbox: 10924 spin_lock_irqsave(&phba->hbalock, iflags); 10925 /* Release the mailbox command posting token */ 10926 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10927 /* Setting active mailbox pointer need to be in sync to flag clear */ 10928 phba->sli.mbox_active = NULL; 10929 spin_unlock_irqrestore(&phba->hbalock, iflags); 10930 /* Wake up worker thread to post the next pending mailbox command */ 10931 lpfc_worker_wake_up(phba); 10932 out_no_mqe_complete: 10933 if (bf_get(lpfc_trailer_consumed, mcqe)) 10934 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 10935 return workposted; 10936 } 10937 10938 /** 10939 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 10940 * @phba: Pointer to HBA context object. 10941 * @cqe: Pointer to mailbox completion queue entry. 10942 * 10943 * This routine process a mailbox completion queue entry, it invokes the 10944 * proper mailbox complete handling or asynchrous event handling routine 10945 * according to the MCQE's async bit. 10946 * 10947 * Return: true if work posted to worker thread, otherwise false. 10948 **/ 10949 static bool 10950 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 10951 { 10952 struct lpfc_mcqe mcqe; 10953 bool workposted; 10954 10955 /* Copy the mailbox MCQE and convert endian order as needed */ 10956 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 10957 10958 /* Invoke the proper event handling routine */ 10959 if (!bf_get(lpfc_trailer_async, &mcqe)) 10960 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 10961 else 10962 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 10963 return workposted; 10964 } 10965 10966 /** 10967 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 10968 * @phba: Pointer to HBA context object. 10969 * @wcqe: Pointer to work-queue completion queue entry. 10970 * 10971 * This routine handles an ELS work-queue completion event. 10972 * 10973 * Return: true if work posted to worker thread, otherwise false. 10974 **/ 10975 static bool 10976 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 10977 struct lpfc_wcqe_complete *wcqe) 10978 { 10979 struct lpfc_iocbq *irspiocbq; 10980 unsigned long iflags; 10981 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 10982 10983 /* Get an irspiocbq for later ELS response processing use */ 10984 irspiocbq = lpfc_sli_get_iocbq(phba); 10985 if (!irspiocbq) { 10986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10987 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 10988 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 10989 pring->txq_cnt, phba->iocb_cnt, 10990 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 10991 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 10992 return false; 10993 } 10994 10995 /* Save off the slow-path queue event for work thread to process */ 10996 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 10997 spin_lock_irqsave(&phba->hbalock, iflags); 10998 list_add_tail(&irspiocbq->cq_event.list, 10999 &phba->sli4_hba.sp_queue_event); 11000 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11001 spin_unlock_irqrestore(&phba->hbalock, iflags); 11002 11003 return true; 11004 } 11005 11006 /** 11007 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 11008 * @phba: Pointer to HBA context object. 11009 * @wcqe: Pointer to work-queue completion queue entry. 11010 * 11011 * This routine handles slow-path WQ entry comsumed event by invoking the 11012 * proper WQ release routine to the slow-path WQ. 11013 **/ 11014 static void 11015 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 11016 struct lpfc_wcqe_release *wcqe) 11017 { 11018 /* sanity check on queue memory */ 11019 if (unlikely(!phba->sli4_hba.els_wq)) 11020 return; 11021 /* Check for the slow-path ELS work queue */ 11022 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 11023 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 11024 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11025 else 11026 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11027 "2579 Slow-path wqe consume event carries " 11028 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 11029 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 11030 phba->sli4_hba.els_wq->queue_id); 11031 } 11032 11033 /** 11034 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 11035 * @phba: Pointer to HBA context object. 11036 * @cq: Pointer to a WQ completion queue. 11037 * @wcqe: Pointer to work-queue completion queue entry. 11038 * 11039 * This routine handles an XRI abort event. 11040 * 11041 * Return: true if work posted to worker thread, otherwise false. 11042 **/ 11043 static bool 11044 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 11045 struct lpfc_queue *cq, 11046 struct sli4_wcqe_xri_aborted *wcqe) 11047 { 11048 bool workposted = false; 11049 struct lpfc_cq_event *cq_event; 11050 unsigned long iflags; 11051 11052 /* Allocate a new internal CQ_EVENT entry */ 11053 cq_event = lpfc_sli4_cq_event_alloc(phba); 11054 if (!cq_event) { 11055 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11056 "0602 Failed to allocate CQ_EVENT entry\n"); 11057 return false; 11058 } 11059 11060 /* Move the CQE into the proper xri abort event list */ 11061 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 11062 switch (cq->subtype) { 11063 case LPFC_FCP: 11064 spin_lock_irqsave(&phba->hbalock, iflags); 11065 list_add_tail(&cq_event->list, 11066 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 11067 /* Set the fcp xri abort event flag */ 11068 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 11069 spin_unlock_irqrestore(&phba->hbalock, iflags); 11070 workposted = true; 11071 break; 11072 case LPFC_ELS: 11073 spin_lock_irqsave(&phba->hbalock, iflags); 11074 list_add_tail(&cq_event->list, 11075 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 11076 /* Set the els xri abort event flag */ 11077 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 11078 spin_unlock_irqrestore(&phba->hbalock, iflags); 11079 workposted = true; 11080 break; 11081 default: 11082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11083 "0603 Invalid work queue CQE subtype (x%x)\n", 11084 cq->subtype); 11085 workposted = false; 11086 break; 11087 } 11088 return workposted; 11089 } 11090 11091 /** 11092 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 11093 * @phba: Pointer to HBA context object. 11094 * @rcqe: Pointer to receive-queue completion queue entry. 11095 * 11096 * This routine process a receive-queue completion queue entry. 11097 * 11098 * Return: true if work posted to worker thread, otherwise false. 11099 **/ 11100 static bool 11101 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 11102 { 11103 bool workposted = false; 11104 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 11105 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 11106 struct hbq_dmabuf *dma_buf; 11107 uint32_t status, rq_id; 11108 unsigned long iflags; 11109 11110 /* sanity check on queue memory */ 11111 if (unlikely(!hrq) || unlikely(!drq)) 11112 return workposted; 11113 11114 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11115 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11116 else 11117 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 11118 if (rq_id != hrq->queue_id) 11119 goto out; 11120 11121 status = bf_get(lpfc_rcqe_status, rcqe); 11122 switch (status) { 11123 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11124 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11125 "2537 Receive Frame Truncated!!\n"); 11126 case FC_STATUS_RQ_SUCCESS: 11127 lpfc_sli4_rq_release(hrq, drq); 11128 spin_lock_irqsave(&phba->hbalock, iflags); 11129 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11130 if (!dma_buf) { 11131 spin_unlock_irqrestore(&phba->hbalock, iflags); 11132 goto out; 11133 } 11134 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11135 /* save off the frame for the word thread to process */ 11136 list_add_tail(&dma_buf->cq_event.list, 11137 &phba->sli4_hba.sp_queue_event); 11138 /* Frame received */ 11139 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11140 spin_unlock_irqrestore(&phba->hbalock, iflags); 11141 workposted = true; 11142 break; 11143 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11144 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11145 /* Post more buffers if possible */ 11146 spin_lock_irqsave(&phba->hbalock, iflags); 11147 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11148 spin_unlock_irqrestore(&phba->hbalock, iflags); 11149 workposted = true; 11150 break; 11151 } 11152 out: 11153 return workposted; 11154 } 11155 11156 /** 11157 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 11158 * @phba: Pointer to HBA context object. 11159 * @cq: Pointer to the completion queue. 11160 * @wcqe: Pointer to a completion queue entry. 11161 * 11162 * This routine process a slow-path work-queue or receive queue completion queue 11163 * entry. 11164 * 11165 * Return: true if work posted to worker thread, otherwise false. 11166 **/ 11167 static bool 11168 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11169 struct lpfc_cqe *cqe) 11170 { 11171 struct lpfc_cqe cqevt; 11172 bool workposted = false; 11173 11174 /* Copy the work queue CQE and convert endian order if needed */ 11175 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 11176 11177 /* Check and process for different type of WCQE and dispatch */ 11178 switch (bf_get(lpfc_cqe_code, &cqevt)) { 11179 case CQE_CODE_COMPL_WQE: 11180 /* Process the WQ/RQ complete event */ 11181 phba->last_completion_time = jiffies; 11182 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 11183 (struct lpfc_wcqe_complete *)&cqevt); 11184 break; 11185 case CQE_CODE_RELEASE_WQE: 11186 /* Process the WQ release event */ 11187 lpfc_sli4_sp_handle_rel_wcqe(phba, 11188 (struct lpfc_wcqe_release *)&cqevt); 11189 break; 11190 case CQE_CODE_XRI_ABORTED: 11191 /* Process the WQ XRI abort event */ 11192 phba->last_completion_time = jiffies; 11193 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11194 (struct sli4_wcqe_xri_aborted *)&cqevt); 11195 break; 11196 case CQE_CODE_RECEIVE: 11197 case CQE_CODE_RECEIVE_V1: 11198 /* Process the RQ event */ 11199 phba->last_completion_time = jiffies; 11200 workposted = lpfc_sli4_sp_handle_rcqe(phba, 11201 (struct lpfc_rcqe *)&cqevt); 11202 break; 11203 default: 11204 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11205 "0388 Not a valid WCQE code: x%x\n", 11206 bf_get(lpfc_cqe_code, &cqevt)); 11207 break; 11208 } 11209 return workposted; 11210 } 11211 11212 /** 11213 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 11214 * @phba: Pointer to HBA context object. 11215 * @eqe: Pointer to fast-path event queue entry. 11216 * 11217 * This routine process a event queue entry from the slow-path event queue. 11218 * It will check the MajorCode and MinorCode to determine this is for a 11219 * completion event on a completion queue, if not, an error shall be logged 11220 * and just return. Otherwise, it will get to the corresponding completion 11221 * queue and process all the entries on that completion queue, rearm the 11222 * completion queue, and then return. 11223 * 11224 **/ 11225 static void 11226 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11227 { 11228 struct lpfc_queue *cq = NULL, *childq, *speq; 11229 struct lpfc_cqe *cqe; 11230 bool workposted = false; 11231 int ecount = 0; 11232 uint16_t cqid; 11233 11234 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 11235 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11236 "0359 Not a valid slow-path completion " 11237 "event: majorcode=x%x, minorcode=x%x\n", 11238 bf_get_le32(lpfc_eqe_major_code, eqe), 11239 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11240 return; 11241 } 11242 11243 /* Get the reference to the corresponding CQ */ 11244 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11245 11246 /* Search for completion queue pointer matching this cqid */ 11247 speq = phba->sli4_hba.sp_eq; 11248 /* sanity check on queue memory */ 11249 if (unlikely(!speq)) 11250 return; 11251 list_for_each_entry(childq, &speq->child_list, list) { 11252 if (childq->queue_id == cqid) { 11253 cq = childq; 11254 break; 11255 } 11256 } 11257 if (unlikely(!cq)) { 11258 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11259 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11260 "0365 Slow-path CQ identifier " 11261 "(%d) does not exist\n", cqid); 11262 return; 11263 } 11264 11265 /* Process all the entries to the CQ */ 11266 switch (cq->type) { 11267 case LPFC_MCQ: 11268 while ((cqe = lpfc_sli4_cq_get(cq))) { 11269 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11270 if (!(++ecount % cq->entry_repost)) 11271 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11272 } 11273 break; 11274 case LPFC_WCQ: 11275 while ((cqe = lpfc_sli4_cq_get(cq))) { 11276 if (cq->subtype == LPFC_FCP) 11277 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, 11278 cqe); 11279 else 11280 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 11281 cqe); 11282 if (!(++ecount % cq->entry_repost)) 11283 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11284 } 11285 break; 11286 default: 11287 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11288 "0370 Invalid completion queue type (%d)\n", 11289 cq->type); 11290 return; 11291 } 11292 11293 /* Catch the no cq entry condition, log an error */ 11294 if (unlikely(ecount == 0)) 11295 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11296 "0371 No entry from the CQ: identifier " 11297 "(x%x), type (%d)\n", cq->queue_id, cq->type); 11298 11299 /* In any case, flash and re-arm the RCQ */ 11300 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11301 11302 /* wake up worker thread if there are works to be done */ 11303 if (workposted) 11304 lpfc_worker_wake_up(phba); 11305 } 11306 11307 /** 11308 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11309 * @eqe: Pointer to fast-path completion queue entry. 11310 * 11311 * This routine process a fast-path work queue completion entry from fast-path 11312 * event queue for FCP command response completion. 11313 **/ 11314 static void 11315 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 11316 struct lpfc_wcqe_complete *wcqe) 11317 { 11318 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11319 struct lpfc_iocbq *cmdiocbq; 11320 struct lpfc_iocbq irspiocbq; 11321 unsigned long iflags; 11322 11323 spin_lock_irqsave(&phba->hbalock, iflags); 11324 pring->stats.iocb_event++; 11325 spin_unlock_irqrestore(&phba->hbalock, iflags); 11326 11327 /* Check for response status */ 11328 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11329 /* If resource errors reported from HBA, reduce queue 11330 * depth of the SCSI device. 11331 */ 11332 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 11333 IOSTAT_LOCAL_REJECT) && 11334 (wcqe->parameter == IOERR_NO_RESOURCES)) { 11335 phba->lpfc_rampdown_queue_depth(phba); 11336 } 11337 /* Log the error status */ 11338 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11339 "0373 FCP complete error: status=x%x, " 11340 "hw_status=x%x, total_data_specified=%d, " 11341 "parameter=x%x, word3=x%x\n", 11342 bf_get(lpfc_wcqe_c_status, wcqe), 11343 bf_get(lpfc_wcqe_c_hw_status, wcqe), 11344 wcqe->total_data_placed, wcqe->parameter, 11345 wcqe->word3); 11346 } 11347 11348 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11349 spin_lock_irqsave(&phba->hbalock, iflags); 11350 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11351 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11352 spin_unlock_irqrestore(&phba->hbalock, iflags); 11353 if (unlikely(!cmdiocbq)) { 11354 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11355 "0374 FCP complete with no corresponding " 11356 "cmdiocb: iotag (%d)\n", 11357 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11358 return; 11359 } 11360 if (unlikely(!cmdiocbq->iocb_cmpl)) { 11361 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11362 "0375 FCP cmdiocb not callback function " 11363 "iotag: (%d)\n", 11364 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11365 return; 11366 } 11367 11368 /* Fake the irspiocb and copy necessary response information */ 11369 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 11370 11371 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 11372 spin_lock_irqsave(&phba->hbalock, iflags); 11373 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 11374 spin_unlock_irqrestore(&phba->hbalock, iflags); 11375 } 11376 11377 /* Pass the cmd_iocb and the rsp state to the upper layer */ 11378 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 11379 } 11380 11381 /** 11382 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 11383 * @phba: Pointer to HBA context object. 11384 * @cq: Pointer to completion queue. 11385 * @wcqe: Pointer to work-queue completion queue entry. 11386 * 11387 * This routine handles an fast-path WQ entry comsumed event by invoking the 11388 * proper WQ release routine to the slow-path WQ. 11389 **/ 11390 static void 11391 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11392 struct lpfc_wcqe_release *wcqe) 11393 { 11394 struct lpfc_queue *childwq; 11395 bool wqid_matched = false; 11396 uint16_t fcp_wqid; 11397 11398 /* Check for fast-path FCP work queue release */ 11399 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 11400 list_for_each_entry(childwq, &cq->child_list, list) { 11401 if (childwq->queue_id == fcp_wqid) { 11402 lpfc_sli4_wq_release(childwq, 11403 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11404 wqid_matched = true; 11405 break; 11406 } 11407 } 11408 /* Report warning log message if no match found */ 11409 if (wqid_matched != true) 11410 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11411 "2580 Fast-path wqe consume event carries " 11412 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 11413 } 11414 11415 /** 11416 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 11417 * @cq: Pointer to the completion queue. 11418 * @eqe: Pointer to fast-path completion queue entry. 11419 * 11420 * This routine process a fast-path work queue completion entry from fast-path 11421 * event queue for FCP command response completion. 11422 **/ 11423 static int 11424 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11425 struct lpfc_cqe *cqe) 11426 { 11427 struct lpfc_wcqe_release wcqe; 11428 bool workposted = false; 11429 11430 /* Copy the work queue CQE and convert endian order if needed */ 11431 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 11432 11433 /* Check and process for different type of WCQE and dispatch */ 11434 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 11435 case CQE_CODE_COMPL_WQE: 11436 /* Process the WQ complete event */ 11437 phba->last_completion_time = jiffies; 11438 lpfc_sli4_fp_handle_fcp_wcqe(phba, 11439 (struct lpfc_wcqe_complete *)&wcqe); 11440 break; 11441 case CQE_CODE_RELEASE_WQE: 11442 /* Process the WQ release event */ 11443 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 11444 (struct lpfc_wcqe_release *)&wcqe); 11445 break; 11446 case CQE_CODE_XRI_ABORTED: 11447 /* Process the WQ XRI abort event */ 11448 phba->last_completion_time = jiffies; 11449 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11450 (struct sli4_wcqe_xri_aborted *)&wcqe); 11451 break; 11452 default: 11453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11454 "0144 Not a valid WCQE code: x%x\n", 11455 bf_get(lpfc_wcqe_c_code, &wcqe)); 11456 break; 11457 } 11458 return workposted; 11459 } 11460 11461 /** 11462 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11463 * @phba: Pointer to HBA context object. 11464 * @eqe: Pointer to fast-path event queue entry. 11465 * 11466 * This routine process a event queue entry from the fast-path event queue. 11467 * It will check the MajorCode and MinorCode to determine this is for a 11468 * completion event on a completion queue, if not, an error shall be logged 11469 * and just return. Otherwise, it will get to the corresponding completion 11470 * queue and process all the entries on the completion queue, rearm the 11471 * completion queue, and then return. 11472 **/ 11473 static void 11474 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11475 uint32_t fcp_cqidx) 11476 { 11477 struct lpfc_queue *cq; 11478 struct lpfc_cqe *cqe; 11479 bool workposted = false; 11480 uint16_t cqid; 11481 int ecount = 0; 11482 11483 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11485 "0366 Not a valid fast-path completion " 11486 "event: majorcode=x%x, minorcode=x%x\n", 11487 bf_get_le32(lpfc_eqe_major_code, eqe), 11488 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11489 return; 11490 } 11491 11492 if (unlikely(!phba->sli4_hba.fcp_cq)) { 11493 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11494 "3146 Fast-path completion queues " 11495 "does not exist\n"); 11496 return; 11497 } 11498 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11499 if (unlikely(!cq)) { 11500 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11501 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11502 "0367 Fast-path completion queue " 11503 "(%d) does not exist\n", fcp_cqidx); 11504 return; 11505 } 11506 11507 /* Get the reference to the corresponding CQ */ 11508 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11509 if (unlikely(cqid != cq->queue_id)) { 11510 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11511 "0368 Miss-matched fast-path completion " 11512 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 11513 cqid, cq->queue_id); 11514 return; 11515 } 11516 11517 /* Process all the entries to the CQ */ 11518 while ((cqe = lpfc_sli4_cq_get(cq))) { 11519 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 11520 if (!(++ecount % cq->entry_repost)) 11521 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11522 } 11523 11524 /* Catch the no cq entry condition */ 11525 if (unlikely(ecount == 0)) 11526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11527 "0369 No entry from fast-path completion " 11528 "queue fcpcqid=%d\n", cq->queue_id); 11529 11530 /* In any case, flash and re-arm the CQ */ 11531 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11532 11533 /* wake up worker thread if there are works to be done */ 11534 if (workposted) 11535 lpfc_worker_wake_up(phba); 11536 } 11537 11538 static void 11539 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 11540 { 11541 struct lpfc_eqe *eqe; 11542 11543 /* walk all the EQ entries and drop on the floor */ 11544 while ((eqe = lpfc_sli4_eq_get(eq))) 11545 ; 11546 11547 /* Clear and re-arm the EQ */ 11548 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 11549 } 11550 11551 /** 11552 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11553 * @irq: Interrupt number. 11554 * @dev_id: The device context pointer. 11555 * 11556 * This function is directly called from the PCI layer as an interrupt 11557 * service routine when device with SLI-4 interface spec is enabled with 11558 * MSI-X multi-message interrupt mode and there are slow-path events in 11559 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 11560 * interrupt mode, this function is called as part of the device-level 11561 * interrupt handler. When the PCI slot is in error recovery or the HBA is 11562 * undergoing initialization, the interrupt handler will not process the 11563 * interrupt. The link attention and ELS ring attention events are handled 11564 * by the worker thread. The interrupt handler signals the worker thread 11565 * and returns for these events. This function is called without any lock 11566 * held. It gets the hbalock to access and update SLI data structures. 11567 * 11568 * This function returns IRQ_HANDLED when interrupt is handled else it 11569 * returns IRQ_NONE. 11570 **/ 11571 irqreturn_t 11572 lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 11573 { 11574 struct lpfc_hba *phba; 11575 struct lpfc_queue *speq; 11576 struct lpfc_eqe *eqe; 11577 unsigned long iflag; 11578 int ecount = 0; 11579 11580 /* 11581 * Get the driver's phba structure from the dev_id 11582 */ 11583 phba = (struct lpfc_hba *)dev_id; 11584 11585 if (unlikely(!phba)) 11586 return IRQ_NONE; 11587 11588 /* Get to the EQ struct associated with this vector */ 11589 speq = phba->sli4_hba.sp_eq; 11590 if (unlikely(!speq)) 11591 return IRQ_NONE; 11592 11593 /* Check device state for handling interrupt */ 11594 if (unlikely(lpfc_intr_state_check(phba))) { 11595 /* Check again for link_state with lock held */ 11596 spin_lock_irqsave(&phba->hbalock, iflag); 11597 if (phba->link_state < LPFC_LINK_DOWN) 11598 /* Flush, clear interrupt, and rearm the EQ */ 11599 lpfc_sli4_eq_flush(phba, speq); 11600 spin_unlock_irqrestore(&phba->hbalock, iflag); 11601 return IRQ_NONE; 11602 } 11603 11604 /* 11605 * Process all the event on FCP slow-path EQ 11606 */ 11607 while ((eqe = lpfc_sli4_eq_get(speq))) { 11608 lpfc_sli4_sp_handle_eqe(phba, eqe); 11609 if (!(++ecount % speq->entry_repost)) 11610 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 11611 } 11612 11613 /* Always clear and re-arm the slow-path EQ */ 11614 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 11615 11616 /* Catch the no cq entry condition */ 11617 if (unlikely(ecount == 0)) { 11618 if (phba->intr_type == MSIX) 11619 /* MSI-X treated interrupt served as no EQ share INT */ 11620 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11621 "0357 MSI-X interrupt with no EQE\n"); 11622 else 11623 /* Non MSI-X treated on interrupt as EQ share INT */ 11624 return IRQ_NONE; 11625 } 11626 11627 return IRQ_HANDLED; 11628 } /* lpfc_sli4_sp_intr_handler */ 11629 11630 /** 11631 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 11632 * @irq: Interrupt number. 11633 * @dev_id: The device context pointer. 11634 * 11635 * This function is directly called from the PCI layer as an interrupt 11636 * service routine when device with SLI-4 interface spec is enabled with 11637 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 11638 * ring event in the HBA. However, when the device is enabled with either 11639 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 11640 * device-level interrupt handler. When the PCI slot is in error recovery 11641 * or the HBA is undergoing initialization, the interrupt handler will not 11642 * process the interrupt. The SCSI FCP fast-path ring event are handled in 11643 * the intrrupt context. This function is called without any lock held. 11644 * It gets the hbalock to access and update SLI data structures. Note that, 11645 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11646 * equal to that of FCP CQ index. 11647 * 11648 * This function returns IRQ_HANDLED when interrupt is handled else it 11649 * returns IRQ_NONE. 11650 **/ 11651 irqreturn_t 11652 lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11653 { 11654 struct lpfc_hba *phba; 11655 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11656 struct lpfc_queue *fpeq; 11657 struct lpfc_eqe *eqe; 11658 unsigned long iflag; 11659 int ecount = 0; 11660 uint32_t fcp_eqidx; 11661 11662 /* Get the driver's phba structure from the dev_id */ 11663 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 11664 phba = fcp_eq_hdl->phba; 11665 fcp_eqidx = fcp_eq_hdl->idx; 11666 11667 if (unlikely(!phba)) 11668 return IRQ_NONE; 11669 if (unlikely(!phba->sli4_hba.fp_eq)) 11670 return IRQ_NONE; 11671 11672 /* Get to the EQ struct associated with this vector */ 11673 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11674 if (unlikely(!fpeq)) 11675 return IRQ_NONE; 11676 11677 /* Check device state for handling interrupt */ 11678 if (unlikely(lpfc_intr_state_check(phba))) { 11679 /* Check again for link_state with lock held */ 11680 spin_lock_irqsave(&phba->hbalock, iflag); 11681 if (phba->link_state < LPFC_LINK_DOWN) 11682 /* Flush, clear interrupt, and rearm the EQ */ 11683 lpfc_sli4_eq_flush(phba, fpeq); 11684 spin_unlock_irqrestore(&phba->hbalock, iflag); 11685 return IRQ_NONE; 11686 } 11687 11688 /* 11689 * Process all the event on FCP fast-path EQ 11690 */ 11691 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11692 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11693 if (!(++ecount % fpeq->entry_repost)) 11694 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11695 } 11696 11697 /* Always clear and re-arm the fast-path EQ */ 11698 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11699 11700 if (unlikely(ecount == 0)) { 11701 if (phba->intr_type == MSIX) 11702 /* MSI-X treated interrupt served as no EQ share INT */ 11703 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11704 "0358 MSI-X interrupt with no EQE\n"); 11705 else 11706 /* Non MSI-X treated on interrupt as EQ share INT */ 11707 return IRQ_NONE; 11708 } 11709 11710 return IRQ_HANDLED; 11711 } /* lpfc_sli4_fp_intr_handler */ 11712 11713 /** 11714 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 11715 * @irq: Interrupt number. 11716 * @dev_id: The device context pointer. 11717 * 11718 * This function is the device-level interrupt handler to device with SLI-4 11719 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 11720 * interrupt mode is enabled and there is an event in the HBA which requires 11721 * driver attention. This function invokes the slow-path interrupt attention 11722 * handling function and fast-path interrupt attention handling function in 11723 * turn to process the relevant HBA attention events. This function is called 11724 * without any lock held. It gets the hbalock to access and update SLI data 11725 * structures. 11726 * 11727 * This function returns IRQ_HANDLED when interrupt is handled, else it 11728 * returns IRQ_NONE. 11729 **/ 11730 irqreturn_t 11731 lpfc_sli4_intr_handler(int irq, void *dev_id) 11732 { 11733 struct lpfc_hba *phba; 11734 irqreturn_t sp_irq_rc, fp_irq_rc; 11735 bool fp_handled = false; 11736 uint32_t fcp_eqidx; 11737 11738 /* Get the driver's phba structure from the dev_id */ 11739 phba = (struct lpfc_hba *)dev_id; 11740 11741 if (unlikely(!phba)) 11742 return IRQ_NONE; 11743 11744 /* 11745 * Invokes slow-path host attention interrupt handling as appropriate. 11746 */ 11747 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 11748 11749 /* 11750 * Invoke fast-path host attention interrupt handling as appropriate. 11751 */ 11752 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11753 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11754 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11755 if (fp_irq_rc == IRQ_HANDLED) 11756 fp_handled |= true; 11757 } 11758 11759 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11760 } /* lpfc_sli4_intr_handler */ 11761 11762 /** 11763 * lpfc_sli4_queue_free - free a queue structure and associated memory 11764 * @queue: The queue structure to free. 11765 * 11766 * This function frees a queue structure and the DMAable memory used for 11767 * the host resident queue. This function must be called after destroying the 11768 * queue on the HBA. 11769 **/ 11770 void 11771 lpfc_sli4_queue_free(struct lpfc_queue *queue) 11772 { 11773 struct lpfc_dmabuf *dmabuf; 11774 11775 if (!queue) 11776 return; 11777 11778 while (!list_empty(&queue->page_list)) { 11779 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 11780 list); 11781 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 11782 dmabuf->virt, dmabuf->phys); 11783 kfree(dmabuf); 11784 } 11785 kfree(queue); 11786 return; 11787 } 11788 11789 /** 11790 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 11791 * @phba: The HBA that this queue is being created on. 11792 * @entry_size: The size of each queue entry for this queue. 11793 * @entry count: The number of entries that this queue will handle. 11794 * 11795 * This function allocates a queue structure and the DMAable memory used for 11796 * the host resident queue. This function must be called before creating the 11797 * queue on the HBA. 11798 **/ 11799 struct lpfc_queue * 11800 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 11801 uint32_t entry_count) 11802 { 11803 struct lpfc_queue *queue; 11804 struct lpfc_dmabuf *dmabuf; 11805 int x, total_qe_count; 11806 void *dma_pointer; 11807 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11808 11809 if (!phba->sli4_hba.pc_sli4_params.supported) 11810 hw_page_size = SLI4_PAGE_SIZE; 11811 11812 queue = kzalloc(sizeof(struct lpfc_queue) + 11813 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 11814 if (!queue) 11815 return NULL; 11816 queue->page_count = (ALIGN(entry_size * entry_count, 11817 hw_page_size))/hw_page_size; 11818 INIT_LIST_HEAD(&queue->list); 11819 INIT_LIST_HEAD(&queue->page_list); 11820 INIT_LIST_HEAD(&queue->child_list); 11821 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 11822 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 11823 if (!dmabuf) 11824 goto out_fail; 11825 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 11826 hw_page_size, &dmabuf->phys, 11827 GFP_KERNEL); 11828 if (!dmabuf->virt) { 11829 kfree(dmabuf); 11830 goto out_fail; 11831 } 11832 memset(dmabuf->virt, 0, hw_page_size); 11833 dmabuf->buffer_tag = x; 11834 list_add_tail(&dmabuf->list, &queue->page_list); 11835 /* initialize queue's entry array */ 11836 dma_pointer = dmabuf->virt; 11837 for (; total_qe_count < entry_count && 11838 dma_pointer < (hw_page_size + dmabuf->virt); 11839 total_qe_count++, dma_pointer += entry_size) { 11840 queue->qe[total_qe_count].address = dma_pointer; 11841 } 11842 } 11843 queue->entry_size = entry_size; 11844 queue->entry_count = entry_count; 11845 11846 /* 11847 * entry_repost is calculated based on the number of entries in the 11848 * queue. This works out except for RQs. If buffers are NOT initially 11849 * posted for every RQE, entry_repost should be adjusted accordingly. 11850 */ 11851 queue->entry_repost = (entry_count >> 3); 11852 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) 11853 queue->entry_repost = LPFC_QUEUE_MIN_REPOST; 11854 queue->phba = phba; 11855 11856 return queue; 11857 out_fail: 11858 lpfc_sli4_queue_free(queue); 11859 return NULL; 11860 } 11861 11862 /** 11863 * lpfc_eq_create - Create an Event Queue on the HBA 11864 * @phba: HBA structure that indicates port to create a queue on. 11865 * @eq: The queue structure to use to create the event queue. 11866 * @imax: The maximum interrupt per second limit. 11867 * 11868 * This function creates an event queue, as detailed in @eq, on a port, 11869 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 11870 * 11871 * The @phba struct is used to send mailbox command to HBA. The @eq struct 11872 * is used to get the entry count and entry size that are necessary to 11873 * determine the number of pages to allocate and use for this queue. This 11874 * function will send the EQ_CREATE mailbox command to the HBA to setup the 11875 * event queue. This function is asynchronous and will wait for the mailbox 11876 * command to finish before continuing. 11877 * 11878 * On success this function will return a zero. If unable to allocate enough 11879 * memory this function will return -ENOMEM. If the queue create mailbox command 11880 * fails this function will return -ENXIO. 11881 **/ 11882 uint32_t 11883 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 11884 { 11885 struct lpfc_mbx_eq_create *eq_create; 11886 LPFC_MBOXQ_t *mbox; 11887 int rc, length, status = 0; 11888 struct lpfc_dmabuf *dmabuf; 11889 uint32_t shdr_status, shdr_add_status; 11890 union lpfc_sli4_cfg_shdr *shdr; 11891 uint16_t dmult; 11892 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11893 11894 /* sanity check on queue memory */ 11895 if (!eq) 11896 return -ENODEV; 11897 if (!phba->sli4_hba.pc_sli4_params.supported) 11898 hw_page_size = SLI4_PAGE_SIZE; 11899 11900 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11901 if (!mbox) 11902 return -ENOMEM; 11903 length = (sizeof(struct lpfc_mbx_eq_create) - 11904 sizeof(struct lpfc_sli4_cfg_mhdr)); 11905 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11906 LPFC_MBOX_OPCODE_EQ_CREATE, 11907 length, LPFC_SLI4_MBX_EMBED); 11908 eq_create = &mbox->u.mqe.un.eq_create; 11909 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 11910 eq->page_count); 11911 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 11912 LPFC_EQE_SIZE); 11913 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 11914 /* Calculate delay multiper from maximum interrupt per second */ 11915 dmult = LPFC_DMULT_CONST/imax - 1; 11916 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 11917 dmult); 11918 switch (eq->entry_count) { 11919 default: 11920 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11921 "0360 Unsupported EQ count. (%d)\n", 11922 eq->entry_count); 11923 if (eq->entry_count < 256) 11924 return -EINVAL; 11925 /* otherwise default to smallest count (drop through) */ 11926 case 256: 11927 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11928 LPFC_EQ_CNT_256); 11929 break; 11930 case 512: 11931 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11932 LPFC_EQ_CNT_512); 11933 break; 11934 case 1024: 11935 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11936 LPFC_EQ_CNT_1024); 11937 break; 11938 case 2048: 11939 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11940 LPFC_EQ_CNT_2048); 11941 break; 11942 case 4096: 11943 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11944 LPFC_EQ_CNT_4096); 11945 break; 11946 } 11947 list_for_each_entry(dmabuf, &eq->page_list, list) { 11948 memset(dmabuf->virt, 0, hw_page_size); 11949 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11950 putPaddrLow(dmabuf->phys); 11951 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 11952 putPaddrHigh(dmabuf->phys); 11953 } 11954 mbox->vport = phba->pport; 11955 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11956 mbox->context1 = NULL; 11957 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11958 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 11959 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11960 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11961 if (shdr_status || shdr_add_status || rc) { 11962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11963 "2500 EQ_CREATE mailbox failed with " 11964 "status x%x add_status x%x, mbx status x%x\n", 11965 shdr_status, shdr_add_status, rc); 11966 status = -ENXIO; 11967 } 11968 eq->type = LPFC_EQ; 11969 eq->subtype = LPFC_NONE; 11970 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 11971 if (eq->queue_id == 0xFFFF) 11972 status = -ENXIO; 11973 eq->host_index = 0; 11974 eq->hba_index = 0; 11975 11976 mempool_free(mbox, phba->mbox_mem_pool); 11977 return status; 11978 } 11979 11980 /** 11981 * lpfc_cq_create - Create a Completion Queue on the HBA 11982 * @phba: HBA structure that indicates port to create a queue on. 11983 * @cq: The queue structure to use to create the completion queue. 11984 * @eq: The event queue to bind this completion queue to. 11985 * 11986 * This function creates a completion queue, as detailed in @wq, on a port, 11987 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 11988 * 11989 * The @phba struct is used to send mailbox command to HBA. The @cq struct 11990 * is used to get the entry count and entry size that are necessary to 11991 * determine the number of pages to allocate and use for this queue. The @eq 11992 * is used to indicate which event queue to bind this completion queue to. This 11993 * function will send the CQ_CREATE mailbox command to the HBA to setup the 11994 * completion queue. This function is asynchronous and will wait for the mailbox 11995 * command to finish before continuing. 11996 * 11997 * On success this function will return a zero. If unable to allocate enough 11998 * memory this function will return -ENOMEM. If the queue create mailbox command 11999 * fails this function will return -ENXIO. 12000 **/ 12001 uint32_t 12002 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 12003 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 12004 { 12005 struct lpfc_mbx_cq_create *cq_create; 12006 struct lpfc_dmabuf *dmabuf; 12007 LPFC_MBOXQ_t *mbox; 12008 int rc, length, status = 0; 12009 uint32_t shdr_status, shdr_add_status; 12010 union lpfc_sli4_cfg_shdr *shdr; 12011 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12012 12013 /* sanity check on queue memory */ 12014 if (!cq || !eq) 12015 return -ENODEV; 12016 if (!phba->sli4_hba.pc_sli4_params.supported) 12017 hw_page_size = SLI4_PAGE_SIZE; 12018 12019 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12020 if (!mbox) 12021 return -ENOMEM; 12022 length = (sizeof(struct lpfc_mbx_cq_create) - 12023 sizeof(struct lpfc_sli4_cfg_mhdr)); 12024 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12025 LPFC_MBOX_OPCODE_CQ_CREATE, 12026 length, LPFC_SLI4_MBX_EMBED); 12027 cq_create = &mbox->u.mqe.un.cq_create; 12028 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 12029 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 12030 cq->page_count); 12031 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 12032 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 12033 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12034 phba->sli4_hba.pc_sli4_params.cqv); 12035 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 12036 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 12037 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 12038 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 12039 eq->queue_id); 12040 } else { 12041 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 12042 eq->queue_id); 12043 } 12044 switch (cq->entry_count) { 12045 default: 12046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12047 "0361 Unsupported CQ count. (%d)\n", 12048 cq->entry_count); 12049 if (cq->entry_count < 256) 12050 return -EINVAL; 12051 /* otherwise default to smallest count (drop through) */ 12052 case 256: 12053 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12054 LPFC_CQ_CNT_256); 12055 break; 12056 case 512: 12057 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12058 LPFC_CQ_CNT_512); 12059 break; 12060 case 1024: 12061 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 12062 LPFC_CQ_CNT_1024); 12063 break; 12064 } 12065 list_for_each_entry(dmabuf, &cq->page_list, list) { 12066 memset(dmabuf->virt, 0, hw_page_size); 12067 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12068 putPaddrLow(dmabuf->phys); 12069 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12070 putPaddrHigh(dmabuf->phys); 12071 } 12072 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12073 12074 /* The IOCTL status is embedded in the mailbox subheader. */ 12075 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12076 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12077 if (shdr_status || shdr_add_status || rc) { 12078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12079 "2501 CQ_CREATE mailbox failed with " 12080 "status x%x add_status x%x, mbx status x%x\n", 12081 shdr_status, shdr_add_status, rc); 12082 status = -ENXIO; 12083 goto out; 12084 } 12085 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12086 if (cq->queue_id == 0xFFFF) { 12087 status = -ENXIO; 12088 goto out; 12089 } 12090 /* link the cq onto the parent eq child list */ 12091 list_add_tail(&cq->list, &eq->child_list); 12092 /* Set up completion queue's type and subtype */ 12093 cq->type = type; 12094 cq->subtype = subtype; 12095 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 12096 cq->assoc_qid = eq->queue_id; 12097 cq->host_index = 0; 12098 cq->hba_index = 0; 12099 12100 out: 12101 mempool_free(mbox, phba->mbox_mem_pool); 12102 return status; 12103 } 12104 12105 /** 12106 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 12107 * @phba: HBA structure that indicates port to create a queue on. 12108 * @mq: The queue structure to use to create the mailbox queue. 12109 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 12110 * @cq: The completion queue to associate with this cq. 12111 * 12112 * This function provides failback (fb) functionality when the 12113 * mq_create_ext fails on older FW generations. It's purpose is identical 12114 * to mq_create_ext otherwise. 12115 * 12116 * This routine cannot fail as all attributes were previously accessed and 12117 * initialized in mq_create_ext. 12118 **/ 12119 static void 12120 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 12121 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 12122 { 12123 struct lpfc_mbx_mq_create *mq_create; 12124 struct lpfc_dmabuf *dmabuf; 12125 int length; 12126 12127 length = (sizeof(struct lpfc_mbx_mq_create) - 12128 sizeof(struct lpfc_sli4_cfg_mhdr)); 12129 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12130 LPFC_MBOX_OPCODE_MQ_CREATE, 12131 length, LPFC_SLI4_MBX_EMBED); 12132 mq_create = &mbox->u.mqe.un.mq_create; 12133 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 12134 mq->page_count); 12135 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 12136 cq->queue_id); 12137 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 12138 switch (mq->entry_count) { 12139 case 16: 12140 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12141 LPFC_MQ_RING_SIZE_16); 12142 break; 12143 case 32: 12144 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12145 LPFC_MQ_RING_SIZE_32); 12146 break; 12147 case 64: 12148 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12149 LPFC_MQ_RING_SIZE_64); 12150 break; 12151 case 128: 12152 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 12153 LPFC_MQ_RING_SIZE_128); 12154 break; 12155 } 12156 list_for_each_entry(dmabuf, &mq->page_list, list) { 12157 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12158 putPaddrLow(dmabuf->phys); 12159 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12160 putPaddrHigh(dmabuf->phys); 12161 } 12162 } 12163 12164 /** 12165 * lpfc_mq_create - Create a mailbox Queue on the HBA 12166 * @phba: HBA structure that indicates port to create a queue on. 12167 * @mq: The queue structure to use to create the mailbox queue. 12168 * @cq: The completion queue to associate with this cq. 12169 * @subtype: The queue's subtype. 12170 * 12171 * This function creates a mailbox queue, as detailed in @mq, on a port, 12172 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 12173 * 12174 * The @phba struct is used to send mailbox command to HBA. The @cq struct 12175 * is used to get the entry count and entry size that are necessary to 12176 * determine the number of pages to allocate and use for this queue. This 12177 * function will send the MQ_CREATE mailbox command to the HBA to setup the 12178 * mailbox queue. This function is asynchronous and will wait for the mailbox 12179 * command to finish before continuing. 12180 * 12181 * On success this function will return a zero. If unable to allocate enough 12182 * memory this function will return -ENOMEM. If the queue create mailbox command 12183 * fails this function will return -ENXIO. 12184 **/ 12185 int32_t 12186 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 12187 struct lpfc_queue *cq, uint32_t subtype) 12188 { 12189 struct lpfc_mbx_mq_create *mq_create; 12190 struct lpfc_mbx_mq_create_ext *mq_create_ext; 12191 struct lpfc_dmabuf *dmabuf; 12192 LPFC_MBOXQ_t *mbox; 12193 int rc, length, status = 0; 12194 uint32_t shdr_status, shdr_add_status; 12195 union lpfc_sli4_cfg_shdr *shdr; 12196 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12197 12198 /* sanity check on queue memory */ 12199 if (!mq || !cq) 12200 return -ENODEV; 12201 if (!phba->sli4_hba.pc_sli4_params.supported) 12202 hw_page_size = SLI4_PAGE_SIZE; 12203 12204 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12205 if (!mbox) 12206 return -ENOMEM; 12207 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 12208 sizeof(struct lpfc_sli4_cfg_mhdr)); 12209 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12210 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 12211 length, LPFC_SLI4_MBX_EMBED); 12212 12213 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 12214 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 12215 bf_set(lpfc_mbx_mq_create_ext_num_pages, 12216 &mq_create_ext->u.request, mq->page_count); 12217 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 12218 &mq_create_ext->u.request, 1); 12219 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 12220 &mq_create_ext->u.request, 1); 12221 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 12222 &mq_create_ext->u.request, 1); 12223 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 12224 &mq_create_ext->u.request, 1); 12225 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 12226 &mq_create_ext->u.request, 1); 12227 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 12228 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12229 phba->sli4_hba.pc_sli4_params.mqv); 12230 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 12231 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 12232 cq->queue_id); 12233 else 12234 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 12235 cq->queue_id); 12236 switch (mq->entry_count) { 12237 default: 12238 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12239 "0362 Unsupported MQ count. (%d)\n", 12240 mq->entry_count); 12241 if (mq->entry_count < 16) 12242 return -EINVAL; 12243 /* otherwise default to smallest count (drop through) */ 12244 case 16: 12245 bf_set(lpfc_mq_context_ring_size, 12246 &mq_create_ext->u.request.context, 12247 LPFC_MQ_RING_SIZE_16); 12248 break; 12249 case 32: 12250 bf_set(lpfc_mq_context_ring_size, 12251 &mq_create_ext->u.request.context, 12252 LPFC_MQ_RING_SIZE_32); 12253 break; 12254 case 64: 12255 bf_set(lpfc_mq_context_ring_size, 12256 &mq_create_ext->u.request.context, 12257 LPFC_MQ_RING_SIZE_64); 12258 break; 12259 case 128: 12260 bf_set(lpfc_mq_context_ring_size, 12261 &mq_create_ext->u.request.context, 12262 LPFC_MQ_RING_SIZE_128); 12263 break; 12264 } 12265 list_for_each_entry(dmabuf, &mq->page_list, list) { 12266 memset(dmabuf->virt, 0, hw_page_size); 12267 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 12268 putPaddrLow(dmabuf->phys); 12269 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 12270 putPaddrHigh(dmabuf->phys); 12271 } 12272 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12273 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12274 &mq_create_ext->u.response); 12275 if (rc != MBX_SUCCESS) { 12276 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12277 "2795 MQ_CREATE_EXT failed with " 12278 "status x%x. Failback to MQ_CREATE.\n", 12279 rc); 12280 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 12281 mq_create = &mbox->u.mqe.un.mq_create; 12282 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12283 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 12284 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 12285 &mq_create->u.response); 12286 } 12287 12288 /* The IOCTL status is embedded in the mailbox subheader. */ 12289 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12290 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12291 if (shdr_status || shdr_add_status || rc) { 12292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12293 "2502 MQ_CREATE mailbox failed with " 12294 "status x%x add_status x%x, mbx status x%x\n", 12295 shdr_status, shdr_add_status, rc); 12296 status = -ENXIO; 12297 goto out; 12298 } 12299 if (mq->queue_id == 0xFFFF) { 12300 status = -ENXIO; 12301 goto out; 12302 } 12303 mq->type = LPFC_MQ; 12304 mq->assoc_qid = cq->queue_id; 12305 mq->subtype = subtype; 12306 mq->host_index = 0; 12307 mq->hba_index = 0; 12308 12309 /* link the mq onto the parent cq child list */ 12310 list_add_tail(&mq->list, &cq->child_list); 12311 out: 12312 mempool_free(mbox, phba->mbox_mem_pool); 12313 return status; 12314 } 12315 12316 /** 12317 * lpfc_wq_create - Create a Work Queue on the HBA 12318 * @phba: HBA structure that indicates port to create a queue on. 12319 * @wq: The queue structure to use to create the work queue. 12320 * @cq: The completion queue to bind this work queue to. 12321 * @subtype: The subtype of the work queue indicating its functionality. 12322 * 12323 * This function creates a work queue, as detailed in @wq, on a port, described 12324 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 12325 * 12326 * The @phba struct is used to send mailbox command to HBA. The @wq struct 12327 * is used to get the entry count and entry size that are necessary to 12328 * determine the number of pages to allocate and use for this queue. The @cq 12329 * is used to indicate which completion queue to bind this work queue to. This 12330 * function will send the WQ_CREATE mailbox command to the HBA to setup the 12331 * work queue. This function is asynchronous and will wait for the mailbox 12332 * command to finish before continuing. 12333 * 12334 * On success this function will return a zero. If unable to allocate enough 12335 * memory this function will return -ENOMEM. If the queue create mailbox command 12336 * fails this function will return -ENXIO. 12337 **/ 12338 uint32_t 12339 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 12340 struct lpfc_queue *cq, uint32_t subtype) 12341 { 12342 struct lpfc_mbx_wq_create *wq_create; 12343 struct lpfc_dmabuf *dmabuf; 12344 LPFC_MBOXQ_t *mbox; 12345 int rc, length, status = 0; 12346 uint32_t shdr_status, shdr_add_status; 12347 union lpfc_sli4_cfg_shdr *shdr; 12348 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12349 struct dma_address *page; 12350 12351 /* sanity check on queue memory */ 12352 if (!wq || !cq) 12353 return -ENODEV; 12354 if (!phba->sli4_hba.pc_sli4_params.supported) 12355 hw_page_size = SLI4_PAGE_SIZE; 12356 12357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12358 if (!mbox) 12359 return -ENOMEM; 12360 length = (sizeof(struct lpfc_mbx_wq_create) - 12361 sizeof(struct lpfc_sli4_cfg_mhdr)); 12362 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12363 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 12364 length, LPFC_SLI4_MBX_EMBED); 12365 wq_create = &mbox->u.mqe.un.wq_create; 12366 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 12367 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 12368 wq->page_count); 12369 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 12370 cq->queue_id); 12371 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12372 phba->sli4_hba.pc_sli4_params.wqv); 12373 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 12374 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 12375 wq->entry_count); 12376 switch (wq->entry_size) { 12377 default: 12378 case 64: 12379 bf_set(lpfc_mbx_wq_create_wqe_size, 12380 &wq_create->u.request_1, 12381 LPFC_WQ_WQE_SIZE_64); 12382 break; 12383 case 128: 12384 bf_set(lpfc_mbx_wq_create_wqe_size, 12385 &wq_create->u.request_1, 12386 LPFC_WQ_WQE_SIZE_128); 12387 break; 12388 } 12389 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 12390 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12391 page = wq_create->u.request_1.page; 12392 } else { 12393 page = wq_create->u.request.page; 12394 } 12395 list_for_each_entry(dmabuf, &wq->page_list, list) { 12396 memset(dmabuf->virt, 0, hw_page_size); 12397 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 12398 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 12399 } 12400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12401 /* The IOCTL status is embedded in the mailbox subheader. */ 12402 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12403 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12404 if (shdr_status || shdr_add_status || rc) { 12405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12406 "2503 WQ_CREATE mailbox failed with " 12407 "status x%x add_status x%x, mbx status x%x\n", 12408 shdr_status, shdr_add_status, rc); 12409 status = -ENXIO; 12410 goto out; 12411 } 12412 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 12413 if (wq->queue_id == 0xFFFF) { 12414 status = -ENXIO; 12415 goto out; 12416 } 12417 wq->type = LPFC_WQ; 12418 wq->assoc_qid = cq->queue_id; 12419 wq->subtype = subtype; 12420 wq->host_index = 0; 12421 wq->hba_index = 0; 12422 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 12423 12424 /* link the wq onto the parent cq child list */ 12425 list_add_tail(&wq->list, &cq->child_list); 12426 out: 12427 mempool_free(mbox, phba->mbox_mem_pool); 12428 return status; 12429 } 12430 12431 /** 12432 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ 12433 * @phba: HBA structure that indicates port to create a queue on. 12434 * @rq: The queue structure to use for the receive queue. 12435 * @qno: The associated HBQ number 12436 * 12437 * 12438 * For SLI4 we need to adjust the RQ repost value based on 12439 * the number of buffers that are initially posted to the RQ. 12440 */ 12441 void 12442 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) 12443 { 12444 uint32_t cnt; 12445 12446 /* sanity check on queue memory */ 12447 if (!rq) 12448 return; 12449 cnt = lpfc_hbq_defs[qno]->entry_count; 12450 12451 /* Recalc repost for RQs based on buffers initially posted */ 12452 cnt = (cnt >> 3); 12453 if (cnt < LPFC_QUEUE_MIN_REPOST) 12454 cnt = LPFC_QUEUE_MIN_REPOST; 12455 12456 rq->entry_repost = cnt; 12457 } 12458 12459 /** 12460 * lpfc_rq_create - Create a Receive Queue on the HBA 12461 * @phba: HBA structure that indicates port to create a queue on. 12462 * @hrq: The queue structure to use to create the header receive queue. 12463 * @drq: The queue structure to use to create the data receive queue. 12464 * @cq: The completion queue to bind this work queue to. 12465 * 12466 * This function creates a receive buffer queue pair , as detailed in @hrq and 12467 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 12468 * to the HBA. 12469 * 12470 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 12471 * struct is used to get the entry count that is necessary to determine the 12472 * number of pages to use for this queue. The @cq is used to indicate which 12473 * completion queue to bind received buffers that are posted to these queues to. 12474 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 12475 * receive queue pair. This function is asynchronous and will wait for the 12476 * mailbox command to finish before continuing. 12477 * 12478 * On success this function will return a zero. If unable to allocate enough 12479 * memory this function will return -ENOMEM. If the queue create mailbox command 12480 * fails this function will return -ENXIO. 12481 **/ 12482 uint32_t 12483 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 12484 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 12485 { 12486 struct lpfc_mbx_rq_create *rq_create; 12487 struct lpfc_dmabuf *dmabuf; 12488 LPFC_MBOXQ_t *mbox; 12489 int rc, length, status = 0; 12490 uint32_t shdr_status, shdr_add_status; 12491 union lpfc_sli4_cfg_shdr *shdr; 12492 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12493 12494 /* sanity check on queue memory */ 12495 if (!hrq || !drq || !cq) 12496 return -ENODEV; 12497 if (!phba->sli4_hba.pc_sli4_params.supported) 12498 hw_page_size = SLI4_PAGE_SIZE; 12499 12500 if (hrq->entry_count != drq->entry_count) 12501 return -EINVAL; 12502 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12503 if (!mbox) 12504 return -ENOMEM; 12505 length = (sizeof(struct lpfc_mbx_rq_create) - 12506 sizeof(struct lpfc_sli4_cfg_mhdr)); 12507 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12508 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 12509 length, LPFC_SLI4_MBX_EMBED); 12510 rq_create = &mbox->u.mqe.un.rq_create; 12511 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 12512 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12513 phba->sli4_hba.pc_sli4_params.rqv); 12514 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 12515 bf_set(lpfc_rq_context_rqe_count_1, 12516 &rq_create->u.request.context, 12517 hrq->entry_count); 12518 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 12519 bf_set(lpfc_rq_context_rqe_size, 12520 &rq_create->u.request.context, 12521 LPFC_RQE_SIZE_8); 12522 bf_set(lpfc_rq_context_page_size, 12523 &rq_create->u.request.context, 12524 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12525 } else { 12526 switch (hrq->entry_count) { 12527 default: 12528 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12529 "2535 Unsupported RQ count. (%d)\n", 12530 hrq->entry_count); 12531 if (hrq->entry_count < 512) 12532 return -EINVAL; 12533 /* otherwise default to smallest count (drop through) */ 12534 case 512: 12535 bf_set(lpfc_rq_context_rqe_count, 12536 &rq_create->u.request.context, 12537 LPFC_RQ_RING_SIZE_512); 12538 break; 12539 case 1024: 12540 bf_set(lpfc_rq_context_rqe_count, 12541 &rq_create->u.request.context, 12542 LPFC_RQ_RING_SIZE_1024); 12543 break; 12544 case 2048: 12545 bf_set(lpfc_rq_context_rqe_count, 12546 &rq_create->u.request.context, 12547 LPFC_RQ_RING_SIZE_2048); 12548 break; 12549 case 4096: 12550 bf_set(lpfc_rq_context_rqe_count, 12551 &rq_create->u.request.context, 12552 LPFC_RQ_RING_SIZE_4096); 12553 break; 12554 } 12555 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12556 LPFC_HDR_BUF_SIZE); 12557 } 12558 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12559 cq->queue_id); 12560 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12561 hrq->page_count); 12562 list_for_each_entry(dmabuf, &hrq->page_list, list) { 12563 memset(dmabuf->virt, 0, hw_page_size); 12564 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12565 putPaddrLow(dmabuf->phys); 12566 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12567 putPaddrHigh(dmabuf->phys); 12568 } 12569 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12570 /* The IOCTL status is embedded in the mailbox subheader. */ 12571 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12572 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12573 if (shdr_status || shdr_add_status || rc) { 12574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12575 "2504 RQ_CREATE mailbox failed with " 12576 "status x%x add_status x%x, mbx status x%x\n", 12577 shdr_status, shdr_add_status, rc); 12578 status = -ENXIO; 12579 goto out; 12580 } 12581 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12582 if (hrq->queue_id == 0xFFFF) { 12583 status = -ENXIO; 12584 goto out; 12585 } 12586 hrq->type = LPFC_HRQ; 12587 hrq->assoc_qid = cq->queue_id; 12588 hrq->subtype = subtype; 12589 hrq->host_index = 0; 12590 hrq->hba_index = 0; 12591 12592 /* now create the data queue */ 12593 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12594 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 12595 length, LPFC_SLI4_MBX_EMBED); 12596 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12597 phba->sli4_hba.pc_sli4_params.rqv); 12598 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 12599 bf_set(lpfc_rq_context_rqe_count_1, 12600 &rq_create->u.request.context, hrq->entry_count); 12601 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 12602 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 12603 LPFC_RQE_SIZE_8); 12604 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 12605 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12606 } else { 12607 switch (drq->entry_count) { 12608 default: 12609 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12610 "2536 Unsupported RQ count. (%d)\n", 12611 drq->entry_count); 12612 if (drq->entry_count < 512) 12613 return -EINVAL; 12614 /* otherwise default to smallest count (drop through) */ 12615 case 512: 12616 bf_set(lpfc_rq_context_rqe_count, 12617 &rq_create->u.request.context, 12618 LPFC_RQ_RING_SIZE_512); 12619 break; 12620 case 1024: 12621 bf_set(lpfc_rq_context_rqe_count, 12622 &rq_create->u.request.context, 12623 LPFC_RQ_RING_SIZE_1024); 12624 break; 12625 case 2048: 12626 bf_set(lpfc_rq_context_rqe_count, 12627 &rq_create->u.request.context, 12628 LPFC_RQ_RING_SIZE_2048); 12629 break; 12630 case 4096: 12631 bf_set(lpfc_rq_context_rqe_count, 12632 &rq_create->u.request.context, 12633 LPFC_RQ_RING_SIZE_4096); 12634 break; 12635 } 12636 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12637 LPFC_DATA_BUF_SIZE); 12638 } 12639 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12640 cq->queue_id); 12641 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12642 drq->page_count); 12643 list_for_each_entry(dmabuf, &drq->page_list, list) { 12644 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12645 putPaddrLow(dmabuf->phys); 12646 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12647 putPaddrHigh(dmabuf->phys); 12648 } 12649 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12650 /* The IOCTL status is embedded in the mailbox subheader. */ 12651 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 12652 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12653 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12654 if (shdr_status || shdr_add_status || rc) { 12655 status = -ENXIO; 12656 goto out; 12657 } 12658 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12659 if (drq->queue_id == 0xFFFF) { 12660 status = -ENXIO; 12661 goto out; 12662 } 12663 drq->type = LPFC_DRQ; 12664 drq->assoc_qid = cq->queue_id; 12665 drq->subtype = subtype; 12666 drq->host_index = 0; 12667 drq->hba_index = 0; 12668 12669 /* link the header and data RQs onto the parent cq child list */ 12670 list_add_tail(&hrq->list, &cq->child_list); 12671 list_add_tail(&drq->list, &cq->child_list); 12672 12673 out: 12674 mempool_free(mbox, phba->mbox_mem_pool); 12675 return status; 12676 } 12677 12678 /** 12679 * lpfc_eq_destroy - Destroy an event Queue on the HBA 12680 * @eq: The queue structure associated with the queue to destroy. 12681 * 12682 * This function destroys a queue, as detailed in @eq by sending an mailbox 12683 * command, specific to the type of queue, to the HBA. 12684 * 12685 * The @eq struct is used to get the queue ID of the queue to destroy. 12686 * 12687 * On success this function will return a zero. If the queue destroy mailbox 12688 * command fails this function will return -ENXIO. 12689 **/ 12690 uint32_t 12691 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 12692 { 12693 LPFC_MBOXQ_t *mbox; 12694 int rc, length, status = 0; 12695 uint32_t shdr_status, shdr_add_status; 12696 union lpfc_sli4_cfg_shdr *shdr; 12697 12698 /* sanity check on queue memory */ 12699 if (!eq) 12700 return -ENODEV; 12701 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 12702 if (!mbox) 12703 return -ENOMEM; 12704 length = (sizeof(struct lpfc_mbx_eq_destroy) - 12705 sizeof(struct lpfc_sli4_cfg_mhdr)); 12706 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12707 LPFC_MBOX_OPCODE_EQ_DESTROY, 12708 length, LPFC_SLI4_MBX_EMBED); 12709 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 12710 eq->queue_id); 12711 mbox->vport = eq->phba->pport; 12712 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12713 12714 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 12715 /* The IOCTL status is embedded in the mailbox subheader. */ 12716 shdr = (union lpfc_sli4_cfg_shdr *) 12717 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 12718 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12719 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12720 if (shdr_status || shdr_add_status || rc) { 12721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12722 "2505 EQ_DESTROY mailbox failed with " 12723 "status x%x add_status x%x, mbx status x%x\n", 12724 shdr_status, shdr_add_status, rc); 12725 status = -ENXIO; 12726 } 12727 12728 /* Remove eq from any list */ 12729 list_del_init(&eq->list); 12730 mempool_free(mbox, eq->phba->mbox_mem_pool); 12731 return status; 12732 } 12733 12734 /** 12735 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 12736 * @cq: The queue structure associated with the queue to destroy. 12737 * 12738 * This function destroys a queue, as detailed in @cq by sending an mailbox 12739 * command, specific to the type of queue, to the HBA. 12740 * 12741 * The @cq struct is used to get the queue ID of the queue to destroy. 12742 * 12743 * On success this function will return a zero. If the queue destroy mailbox 12744 * command fails this function will return -ENXIO. 12745 **/ 12746 uint32_t 12747 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 12748 { 12749 LPFC_MBOXQ_t *mbox; 12750 int rc, length, status = 0; 12751 uint32_t shdr_status, shdr_add_status; 12752 union lpfc_sli4_cfg_shdr *shdr; 12753 12754 /* sanity check on queue memory */ 12755 if (!cq) 12756 return -ENODEV; 12757 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 12758 if (!mbox) 12759 return -ENOMEM; 12760 length = (sizeof(struct lpfc_mbx_cq_destroy) - 12761 sizeof(struct lpfc_sli4_cfg_mhdr)); 12762 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12763 LPFC_MBOX_OPCODE_CQ_DESTROY, 12764 length, LPFC_SLI4_MBX_EMBED); 12765 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 12766 cq->queue_id); 12767 mbox->vport = cq->phba->pport; 12768 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12769 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 12770 /* The IOCTL status is embedded in the mailbox subheader. */ 12771 shdr = (union lpfc_sli4_cfg_shdr *) 12772 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 12773 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12774 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12775 if (shdr_status || shdr_add_status || rc) { 12776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12777 "2506 CQ_DESTROY mailbox failed with " 12778 "status x%x add_status x%x, mbx status x%x\n", 12779 shdr_status, shdr_add_status, rc); 12780 status = -ENXIO; 12781 } 12782 /* Remove cq from any list */ 12783 list_del_init(&cq->list); 12784 mempool_free(mbox, cq->phba->mbox_mem_pool); 12785 return status; 12786 } 12787 12788 /** 12789 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 12790 * @qm: The queue structure associated with the queue to destroy. 12791 * 12792 * This function destroys a queue, as detailed in @mq by sending an mailbox 12793 * command, specific to the type of queue, to the HBA. 12794 * 12795 * The @mq struct is used to get the queue ID of the queue to destroy. 12796 * 12797 * On success this function will return a zero. If the queue destroy mailbox 12798 * command fails this function will return -ENXIO. 12799 **/ 12800 uint32_t 12801 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 12802 { 12803 LPFC_MBOXQ_t *mbox; 12804 int rc, length, status = 0; 12805 uint32_t shdr_status, shdr_add_status; 12806 union lpfc_sli4_cfg_shdr *shdr; 12807 12808 /* sanity check on queue memory */ 12809 if (!mq) 12810 return -ENODEV; 12811 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 12812 if (!mbox) 12813 return -ENOMEM; 12814 length = (sizeof(struct lpfc_mbx_mq_destroy) - 12815 sizeof(struct lpfc_sli4_cfg_mhdr)); 12816 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12817 LPFC_MBOX_OPCODE_MQ_DESTROY, 12818 length, LPFC_SLI4_MBX_EMBED); 12819 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 12820 mq->queue_id); 12821 mbox->vport = mq->phba->pport; 12822 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12823 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 12824 /* The IOCTL status is embedded in the mailbox subheader. */ 12825 shdr = (union lpfc_sli4_cfg_shdr *) 12826 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 12827 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12828 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12829 if (shdr_status || shdr_add_status || rc) { 12830 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12831 "2507 MQ_DESTROY mailbox failed with " 12832 "status x%x add_status x%x, mbx status x%x\n", 12833 shdr_status, shdr_add_status, rc); 12834 status = -ENXIO; 12835 } 12836 /* Remove mq from any list */ 12837 list_del_init(&mq->list); 12838 mempool_free(mbox, mq->phba->mbox_mem_pool); 12839 return status; 12840 } 12841 12842 /** 12843 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 12844 * @wq: The queue structure associated with the queue to destroy. 12845 * 12846 * This function destroys a queue, as detailed in @wq by sending an mailbox 12847 * command, specific to the type of queue, to the HBA. 12848 * 12849 * The @wq struct is used to get the queue ID of the queue to destroy. 12850 * 12851 * On success this function will return a zero. If the queue destroy mailbox 12852 * command fails this function will return -ENXIO. 12853 **/ 12854 uint32_t 12855 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 12856 { 12857 LPFC_MBOXQ_t *mbox; 12858 int rc, length, status = 0; 12859 uint32_t shdr_status, shdr_add_status; 12860 union lpfc_sli4_cfg_shdr *shdr; 12861 12862 /* sanity check on queue memory */ 12863 if (!wq) 12864 return -ENODEV; 12865 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 12866 if (!mbox) 12867 return -ENOMEM; 12868 length = (sizeof(struct lpfc_mbx_wq_destroy) - 12869 sizeof(struct lpfc_sli4_cfg_mhdr)); 12870 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12871 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 12872 length, LPFC_SLI4_MBX_EMBED); 12873 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 12874 wq->queue_id); 12875 mbox->vport = wq->phba->pport; 12876 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12877 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 12878 shdr = (union lpfc_sli4_cfg_shdr *) 12879 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 12880 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12881 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12882 if (shdr_status || shdr_add_status || rc) { 12883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12884 "2508 WQ_DESTROY mailbox failed with " 12885 "status x%x add_status x%x, mbx status x%x\n", 12886 shdr_status, shdr_add_status, rc); 12887 status = -ENXIO; 12888 } 12889 /* Remove wq from any list */ 12890 list_del_init(&wq->list); 12891 mempool_free(mbox, wq->phba->mbox_mem_pool); 12892 return status; 12893 } 12894 12895 /** 12896 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 12897 * @rq: The queue structure associated with the queue to destroy. 12898 * 12899 * This function destroys a queue, as detailed in @rq by sending an mailbox 12900 * command, specific to the type of queue, to the HBA. 12901 * 12902 * The @rq struct is used to get the queue ID of the queue to destroy. 12903 * 12904 * On success this function will return a zero. If the queue destroy mailbox 12905 * command fails this function will return -ENXIO. 12906 **/ 12907 uint32_t 12908 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 12909 struct lpfc_queue *drq) 12910 { 12911 LPFC_MBOXQ_t *mbox; 12912 int rc, length, status = 0; 12913 uint32_t shdr_status, shdr_add_status; 12914 union lpfc_sli4_cfg_shdr *shdr; 12915 12916 /* sanity check on queue memory */ 12917 if (!hrq || !drq) 12918 return -ENODEV; 12919 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 12920 if (!mbox) 12921 return -ENOMEM; 12922 length = (sizeof(struct lpfc_mbx_rq_destroy) - 12923 sizeof(struct lpfc_sli4_cfg_mhdr)); 12924 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12925 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 12926 length, LPFC_SLI4_MBX_EMBED); 12927 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 12928 hrq->queue_id); 12929 mbox->vport = hrq->phba->pport; 12930 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12931 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 12932 /* The IOCTL status is embedded in the mailbox subheader. */ 12933 shdr = (union lpfc_sli4_cfg_shdr *) 12934 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 12935 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12936 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12937 if (shdr_status || shdr_add_status || rc) { 12938 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12939 "2509 RQ_DESTROY mailbox failed with " 12940 "status x%x add_status x%x, mbx status x%x\n", 12941 shdr_status, shdr_add_status, rc); 12942 if (rc != MBX_TIMEOUT) 12943 mempool_free(mbox, hrq->phba->mbox_mem_pool); 12944 return -ENXIO; 12945 } 12946 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 12947 drq->queue_id); 12948 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 12949 shdr = (union lpfc_sli4_cfg_shdr *) 12950 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 12951 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12952 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12953 if (shdr_status || shdr_add_status || rc) { 12954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12955 "2510 RQ_DESTROY mailbox failed with " 12956 "status x%x add_status x%x, mbx status x%x\n", 12957 shdr_status, shdr_add_status, rc); 12958 status = -ENXIO; 12959 } 12960 list_del_init(&hrq->list); 12961 list_del_init(&drq->list); 12962 mempool_free(mbox, hrq->phba->mbox_mem_pool); 12963 return status; 12964 } 12965 12966 /** 12967 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 12968 * @phba: The virtual port for which this call being executed. 12969 * @pdma_phys_addr0: Physical address of the 1st SGL page. 12970 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 12971 * @xritag: the xritag that ties this io to the SGL pages. 12972 * 12973 * This routine will post the sgl pages for the IO that has the xritag 12974 * that is in the iocbq structure. The xritag is assigned during iocbq 12975 * creation and persists for as long as the driver is loaded. 12976 * if the caller has fewer than 256 scatter gather segments to map then 12977 * pdma_phys_addr1 should be 0. 12978 * If the caller needs to map more than 256 scatter gather segment then 12979 * pdma_phys_addr1 should be a valid physical address. 12980 * physical address for SGLs must be 64 byte aligned. 12981 * If you are going to map 2 SGL's then the first one must have 256 entries 12982 * the second sgl can have between 1 and 256 entries. 12983 * 12984 * Return codes: 12985 * 0 - Success 12986 * -ENXIO, -ENOMEM - Failure 12987 **/ 12988 int 12989 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 12990 dma_addr_t pdma_phys_addr0, 12991 dma_addr_t pdma_phys_addr1, 12992 uint16_t xritag) 12993 { 12994 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 12995 LPFC_MBOXQ_t *mbox; 12996 int rc; 12997 uint32_t shdr_status, shdr_add_status; 12998 uint32_t mbox_tmo; 12999 union lpfc_sli4_cfg_shdr *shdr; 13000 13001 if (xritag == NO_XRI) { 13002 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13003 "0364 Invalid param:\n"); 13004 return -EINVAL; 13005 } 13006 13007 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13008 if (!mbox) 13009 return -ENOMEM; 13010 13011 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13012 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13013 sizeof(struct lpfc_mbx_post_sgl_pages) - 13014 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 13015 13016 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 13017 &mbox->u.mqe.un.post_sgl_pages; 13018 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 13019 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 13020 13021 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 13022 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 13023 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 13024 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 13025 13026 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 13027 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 13028 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 13029 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 13030 if (!phba->sli4_hba.intr_enable) 13031 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13032 else { 13033 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13034 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13035 } 13036 /* The IOCTL status is embedded in the mailbox subheader. */ 13037 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 13038 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13039 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13040 if (rc != MBX_TIMEOUT) 13041 mempool_free(mbox, phba->mbox_mem_pool); 13042 if (shdr_status || shdr_add_status || rc) { 13043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13044 "2511 POST_SGL mailbox failed with " 13045 "status x%x add_status x%x, mbx status x%x\n", 13046 shdr_status, shdr_add_status, rc); 13047 rc = -ENXIO; 13048 } 13049 return 0; 13050 } 13051 13052 /** 13053 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 13054 * @phba: pointer to lpfc hba data structure. 13055 * 13056 * This routine is invoked to post rpi header templates to the 13057 * HBA consistent with the SLI-4 interface spec. This routine 13058 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 13059 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 13060 * 13061 * Returns 13062 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 13063 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 13064 **/ 13065 uint16_t 13066 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 13067 { 13068 unsigned long xri; 13069 13070 /* 13071 * Fetch the next logical xri. Because this index is logical, 13072 * the driver starts at 0 each time. 13073 */ 13074 spin_lock_irq(&phba->hbalock); 13075 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 13076 phba->sli4_hba.max_cfg_param.max_xri, 0); 13077 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 13078 spin_unlock_irq(&phba->hbalock); 13079 return NO_XRI; 13080 } else { 13081 set_bit(xri, phba->sli4_hba.xri_bmask); 13082 phba->sli4_hba.max_cfg_param.xri_used++; 13083 phba->sli4_hba.xri_count++; 13084 } 13085 13086 spin_unlock_irq(&phba->hbalock); 13087 return xri; 13088 } 13089 13090 /** 13091 * lpfc_sli4_free_xri - Release an xri for reuse. 13092 * @phba: pointer to lpfc hba data structure. 13093 * 13094 * This routine is invoked to release an xri to the pool of 13095 * available rpis maintained by the driver. 13096 **/ 13097 void 13098 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13099 { 13100 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 13101 phba->sli4_hba.xri_count--; 13102 phba->sli4_hba.max_cfg_param.xri_used--; 13103 } 13104 } 13105 13106 /** 13107 * lpfc_sli4_free_xri - Release an xri for reuse. 13108 * @phba: pointer to lpfc hba data structure. 13109 * 13110 * This routine is invoked to release an xri to the pool of 13111 * available rpis maintained by the driver. 13112 **/ 13113 void 13114 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 13115 { 13116 spin_lock_irq(&phba->hbalock); 13117 __lpfc_sli4_free_xri(phba, xri); 13118 spin_unlock_irq(&phba->hbalock); 13119 } 13120 13121 /** 13122 * lpfc_sli4_next_xritag - Get an xritag for the io 13123 * @phba: Pointer to HBA context object. 13124 * 13125 * This function gets an xritag for the iocb. If there is no unused xritag 13126 * it will return 0xffff. 13127 * The function returns the allocated xritag if successful, else returns zero. 13128 * Zero is not a valid xritag. 13129 * The caller is not required to hold any lock. 13130 **/ 13131 uint16_t 13132 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 13133 { 13134 uint16_t xri_index; 13135 13136 xri_index = lpfc_sli4_alloc_xri(phba); 13137 if (xri_index != NO_XRI) 13138 return xri_index; 13139 13140 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13141 "2004 Failed to allocate XRI.last XRITAG is %d" 13142 " Max XRI is %d, Used XRI is %d\n", 13143 xri_index, 13144 phba->sli4_hba.max_cfg_param.max_xri, 13145 phba->sli4_hba.max_cfg_param.xri_used); 13146 return NO_XRI; 13147 } 13148 13149 /** 13150 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 13151 * @phba: pointer to lpfc hba data structure. 13152 * 13153 * This routine is invoked to post a block of driver's sgl pages to the 13154 * HBA using non-embedded mailbox command. No Lock is held. This routine 13155 * is only called when the driver is loading and after all IO has been 13156 * stopped. 13157 **/ 13158 int 13159 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) 13160 { 13161 struct lpfc_sglq *sglq_entry; 13162 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13163 struct sgl_page_pairs *sgl_pg_pairs; 13164 void *viraddr; 13165 LPFC_MBOXQ_t *mbox; 13166 uint32_t reqlen, alloclen, pg_pairs; 13167 uint32_t mbox_tmo; 13168 uint16_t xritag_start = 0, lxri = 0; 13169 int els_xri_cnt, rc = 0; 13170 uint32_t shdr_status, shdr_add_status; 13171 union lpfc_sli4_cfg_shdr *shdr; 13172 13173 /* The number of sgls to be posted */ 13174 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 13175 13176 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 13177 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13178 if (reqlen > SLI4_PAGE_SIZE) { 13179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13180 "2559 Block sgl registration required DMA " 13181 "size (%d) great than a page\n", reqlen); 13182 return -ENOMEM; 13183 } 13184 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13185 if (!mbox) 13186 return -ENOMEM; 13187 13188 /* Allocate DMA memory and set up the non-embedded mailbox command */ 13189 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13190 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 13191 LPFC_SLI4_MBX_NEMBED); 13192 13193 if (alloclen < reqlen) { 13194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13195 "0285 Allocated DMA memory size (%d) is " 13196 "less than the requested DMA memory " 13197 "size (%d)\n", alloclen, reqlen); 13198 lpfc_sli4_mbox_cmd_free(phba, mbox); 13199 return -ENOMEM; 13200 } 13201 /* Set up the SGL pages in the non-embedded DMA pages */ 13202 viraddr = mbox->sge_array->addr[0]; 13203 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13204 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13205 13206 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 13207 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 13208 13209 /* 13210 * Assign the sglq a physical xri only if the driver has not 13211 * initialized those resources. A port reset only needs 13212 * the sglq's posted. 13213 */ 13214 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 13215 LPFC_XRI_RSRC_RDY) { 13216 lxri = lpfc_sli4_next_xritag(phba); 13217 if (lxri == NO_XRI) { 13218 lpfc_sli4_mbox_cmd_free(phba, mbox); 13219 return -ENOMEM; 13220 } 13221 sglq_entry->sli4_lxritag = lxri; 13222 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 13223 } 13224 13225 /* Set up the sge entry */ 13226 sgl_pg_pairs->sgl_pg0_addr_lo = 13227 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13228 sgl_pg_pairs->sgl_pg0_addr_hi = 13229 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 13230 sgl_pg_pairs->sgl_pg1_addr_lo = 13231 cpu_to_le32(putPaddrLow(0)); 13232 sgl_pg_pairs->sgl_pg1_addr_hi = 13233 cpu_to_le32(putPaddrHigh(0)); 13234 13235 /* Keep the first xritag on the list */ 13236 if (pg_pairs == 0) 13237 xritag_start = sglq_entry->sli4_xritag; 13238 sgl_pg_pairs++; 13239 } 13240 13241 /* Complete initialization and perform endian conversion. */ 13242 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13243 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 13244 sgl->word0 = cpu_to_le32(sgl->word0); 13245 if (!phba->sli4_hba.intr_enable) 13246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13247 else { 13248 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13249 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13250 } 13251 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13252 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13254 if (rc != MBX_TIMEOUT) 13255 lpfc_sli4_mbox_cmd_free(phba, mbox); 13256 if (shdr_status || shdr_add_status || rc) { 13257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13258 "2513 POST_SGL_BLOCK mailbox command failed " 13259 "status x%x add_status x%x mbx status x%x\n", 13260 shdr_status, shdr_add_status, rc); 13261 rc = -ENXIO; 13262 } 13263 13264 if (rc == 0) 13265 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 13266 LPFC_XRI_RSRC_RDY); 13267 return rc; 13268 } 13269 13270 /** 13271 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port. 13272 * @phba: pointer to lpfc hba data structure. 13273 * 13274 * This routine is invoked to post a block of driver's sgl pages to the 13275 * HBA using non-embedded mailbox command. No Lock is held. This routine 13276 * is only called when the driver is loading and after all IO has been 13277 * stopped. 13278 **/ 13279 int 13280 lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) 13281 { 13282 struct lpfc_sglq *sglq_entry; 13283 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13284 struct sgl_page_pairs *sgl_pg_pairs; 13285 void *viraddr; 13286 LPFC_MBOXQ_t *mbox; 13287 uint32_t reqlen, alloclen, index; 13288 uint32_t mbox_tmo; 13289 uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt; 13290 uint16_t xritag_start = 0, lxri = 0; 13291 struct lpfc_rsrc_blks *rsrc_blk; 13292 int cnt, ttl_cnt, rc = 0; 13293 int loop_cnt; 13294 uint32_t shdr_status, shdr_add_status; 13295 union lpfc_sli4_cfg_shdr *shdr; 13296 13297 /* The number of sgls to be posted */ 13298 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 13299 13300 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 13301 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13302 if (reqlen > SLI4_PAGE_SIZE) { 13303 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13304 "2989 Block sgl registration required DMA " 13305 "size (%d) great than a page\n", reqlen); 13306 return -ENOMEM; 13307 } 13308 13309 cnt = 0; 13310 ttl_cnt = 0; 13311 post_els_xri_cnt = els_xri_cnt; 13312 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, 13313 list) { 13314 rsrc_start = rsrc_blk->rsrc_start; 13315 rsrc_size = rsrc_blk->rsrc_size; 13316 13317 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13318 "3014 Working ELS Extent start %d, cnt %d\n", 13319 rsrc_start, rsrc_size); 13320 13321 loop_cnt = min(post_els_xri_cnt, rsrc_size); 13322 if (loop_cnt < post_els_xri_cnt) { 13323 post_els_xri_cnt -= loop_cnt; 13324 ttl_cnt += loop_cnt; 13325 } else 13326 ttl_cnt += post_els_xri_cnt; 13327 13328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13329 if (!mbox) 13330 return -ENOMEM; 13331 /* 13332 * Allocate DMA memory and set up the non-embedded mailbox 13333 * command. 13334 */ 13335 alloclen = lpfc_sli4_config(phba, mbox, 13336 LPFC_MBOX_SUBSYSTEM_FCOE, 13337 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13338 reqlen, LPFC_SLI4_MBX_NEMBED); 13339 if (alloclen < reqlen) { 13340 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13341 "2987 Allocated DMA memory size (%d) " 13342 "is less than the requested DMA memory " 13343 "size (%d)\n", alloclen, reqlen); 13344 lpfc_sli4_mbox_cmd_free(phba, mbox); 13345 return -ENOMEM; 13346 } 13347 13348 /* Set up the SGL pages in the non-embedded DMA pages */ 13349 viraddr = mbox->sge_array->addr[0]; 13350 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13351 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13352 13353 /* 13354 * The starting resource may not begin at zero. Control 13355 * the loop variants via the block resource parameters, 13356 * but handle the sge pointers with a zero-based index 13357 * that doesn't get reset per loop pass. 13358 */ 13359 for (index = rsrc_start; 13360 index < rsrc_start + loop_cnt; 13361 index++) { 13362 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt]; 13363 13364 /* 13365 * Assign the sglq a physical xri only if the driver 13366 * has not initialized those resources. A port reset 13367 * only needs the sglq's posted. 13368 */ 13369 if (bf_get(lpfc_xri_rsrc_rdy, 13370 &phba->sli4_hba.sli4_flags) != 13371 LPFC_XRI_RSRC_RDY) { 13372 lxri = lpfc_sli4_next_xritag(phba); 13373 if (lxri == NO_XRI) { 13374 lpfc_sli4_mbox_cmd_free(phba, mbox); 13375 rc = -ENOMEM; 13376 goto err_exit; 13377 } 13378 sglq_entry->sli4_lxritag = lxri; 13379 sglq_entry->sli4_xritag = 13380 phba->sli4_hba.xri_ids[lxri]; 13381 } 13382 13383 /* Set up the sge entry */ 13384 sgl_pg_pairs->sgl_pg0_addr_lo = 13385 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 13386 sgl_pg_pairs->sgl_pg0_addr_hi = 13387 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 13388 sgl_pg_pairs->sgl_pg1_addr_lo = 13389 cpu_to_le32(putPaddrLow(0)); 13390 sgl_pg_pairs->sgl_pg1_addr_hi = 13391 cpu_to_le32(putPaddrHigh(0)); 13392 13393 /* Track the starting physical XRI for the mailbox. */ 13394 if (index == rsrc_start) 13395 xritag_start = sglq_entry->sli4_xritag; 13396 sgl_pg_pairs++; 13397 cnt++; 13398 } 13399 13400 /* Complete initialization and perform endian conversion. */ 13401 rsrc_blk->rsrc_used += loop_cnt; 13402 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13403 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt); 13404 sgl->word0 = cpu_to_le32(sgl->word0); 13405 13406 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13407 "3015 Post ELS Extent SGL, start %d, " 13408 "cnt %d, used %d\n", 13409 xritag_start, loop_cnt, rsrc_blk->rsrc_used); 13410 if (!phba->sli4_hba.intr_enable) 13411 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13412 else { 13413 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13414 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13415 } 13416 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13417 shdr_status = bf_get(lpfc_mbox_hdr_status, 13418 &shdr->response); 13419 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13420 &shdr->response); 13421 if (rc != MBX_TIMEOUT) 13422 lpfc_sli4_mbox_cmd_free(phba, mbox); 13423 if (shdr_status || shdr_add_status || rc) { 13424 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13425 "2988 POST_SGL_BLOCK mailbox " 13426 "command failed status x%x " 13427 "add_status x%x mbx status x%x\n", 13428 shdr_status, shdr_add_status, rc); 13429 rc = -ENXIO; 13430 goto err_exit; 13431 } 13432 if (ttl_cnt >= els_xri_cnt) 13433 break; 13434 } 13435 13436 err_exit: 13437 if (rc == 0) 13438 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 13439 LPFC_XRI_RSRC_RDY); 13440 return rc; 13441 } 13442 13443 /** 13444 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 13445 * @phba: pointer to lpfc hba data structure. 13446 * @sblist: pointer to scsi buffer list. 13447 * @count: number of scsi buffers on the list. 13448 * 13449 * This routine is invoked to post a block of @count scsi sgl pages from a 13450 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 13451 * No Lock is held. 13452 * 13453 **/ 13454 int 13455 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 13456 int cnt) 13457 { 13458 struct lpfc_scsi_buf *psb; 13459 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13460 struct sgl_page_pairs *sgl_pg_pairs; 13461 void *viraddr; 13462 LPFC_MBOXQ_t *mbox; 13463 uint32_t reqlen, alloclen, pg_pairs; 13464 uint32_t mbox_tmo; 13465 uint16_t xritag_start = 0; 13466 int rc = 0; 13467 uint32_t shdr_status, shdr_add_status; 13468 dma_addr_t pdma_phys_bpl1; 13469 union lpfc_sli4_cfg_shdr *shdr; 13470 13471 /* Calculate the requested length of the dma memory */ 13472 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13473 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13474 if (reqlen > SLI4_PAGE_SIZE) { 13475 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13476 "0217 Block sgl registration required DMA " 13477 "size (%d) great than a page\n", reqlen); 13478 return -ENOMEM; 13479 } 13480 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13481 if (!mbox) { 13482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13483 "0283 Failed to allocate mbox cmd memory\n"); 13484 return -ENOMEM; 13485 } 13486 13487 /* Allocate DMA memory and set up the non-embedded mailbox command */ 13488 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13489 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 13490 LPFC_SLI4_MBX_NEMBED); 13491 13492 if (alloclen < reqlen) { 13493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13494 "2561 Allocated DMA memory size (%d) is " 13495 "less than the requested DMA memory " 13496 "size (%d)\n", alloclen, reqlen); 13497 lpfc_sli4_mbox_cmd_free(phba, mbox); 13498 return -ENOMEM; 13499 } 13500 13501 /* Get the first SGE entry from the non-embedded DMA memory */ 13502 viraddr = mbox->sge_array->addr[0]; 13503 13504 /* Set up the SGL pages in the non-embedded DMA pages */ 13505 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13506 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13507 13508 pg_pairs = 0; 13509 list_for_each_entry(psb, sblist, list) { 13510 /* Set up the sge entry */ 13511 sgl_pg_pairs->sgl_pg0_addr_lo = 13512 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 13513 sgl_pg_pairs->sgl_pg0_addr_hi = 13514 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 13515 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 13516 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 13517 else 13518 pdma_phys_bpl1 = 0; 13519 sgl_pg_pairs->sgl_pg1_addr_lo = 13520 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 13521 sgl_pg_pairs->sgl_pg1_addr_hi = 13522 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 13523 /* Keep the first xritag on the list */ 13524 if (pg_pairs == 0) 13525 xritag_start = psb->cur_iocbq.sli4_xritag; 13526 sgl_pg_pairs++; 13527 pg_pairs++; 13528 } 13529 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 13530 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 13531 /* Perform endian conversion if necessary */ 13532 sgl->word0 = cpu_to_le32(sgl->word0); 13533 13534 if (!phba->sli4_hba.intr_enable) 13535 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13536 else { 13537 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13538 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13539 } 13540 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13541 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13542 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13543 if (rc != MBX_TIMEOUT) 13544 lpfc_sli4_mbox_cmd_free(phba, mbox); 13545 if (shdr_status || shdr_add_status || rc) { 13546 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13547 "2564 POST_SGL_BLOCK mailbox command failed " 13548 "status x%x add_status x%x mbx status x%x\n", 13549 shdr_status, shdr_add_status, rc); 13550 rc = -ENXIO; 13551 } 13552 return rc; 13553 } 13554 13555 /** 13556 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port. 13557 * @phba: pointer to lpfc hba data structure. 13558 * @sblist: pointer to scsi buffer list. 13559 * @count: number of scsi buffers on the list. 13560 * 13561 * This routine is invoked to post a block of @count scsi sgl pages from a 13562 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 13563 * No Lock is held. 13564 * 13565 **/ 13566 int 13567 lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist, 13568 int cnt) 13569 { 13570 struct lpfc_scsi_buf *psb = NULL; 13571 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13572 struct sgl_page_pairs *sgl_pg_pairs; 13573 void *viraddr; 13574 LPFC_MBOXQ_t *mbox; 13575 uint32_t reqlen, alloclen, pg_pairs; 13576 uint32_t mbox_tmo; 13577 uint16_t xri_start = 0, scsi_xri_start; 13578 uint16_t rsrc_range; 13579 int rc = 0, avail_cnt; 13580 uint32_t shdr_status, shdr_add_status; 13581 dma_addr_t pdma_phys_bpl1; 13582 union lpfc_sli4_cfg_shdr *shdr; 13583 struct lpfc_rsrc_blks *rsrc_blk; 13584 uint32_t xri_cnt = 0; 13585 13586 /* Calculate the total requested length of the dma memory */ 13587 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13588 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13589 if (reqlen > SLI4_PAGE_SIZE) { 13590 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13591 "2932 Block sgl registration required DMA " 13592 "size (%d) great than a page\n", reqlen); 13593 return -ENOMEM; 13594 } 13595 13596 /* 13597 * The use of extents requires the driver to post the sgl headers 13598 * in multiple postings to meet the contiguous resource assignment. 13599 */ 13600 psb = list_prepare_entry(psb, sblist, list); 13601 scsi_xri_start = phba->sli4_hba.scsi_xri_start; 13602 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, 13603 list) { 13604 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size; 13605 if (rsrc_range < scsi_xri_start) 13606 continue; 13607 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size) 13608 continue; 13609 else 13610 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used; 13611 13612 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) + 13613 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13614 /* 13615 * Allocate DMA memory and set up the non-embedded mailbox 13616 * command. The mbox is used to post an SGL page per loop 13617 * but the DMA memory has a use-once semantic so the mailbox 13618 * is used and freed per loop pass. 13619 */ 13620 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13621 if (!mbox) { 13622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13623 "2933 Failed to allocate mbox cmd " 13624 "memory\n"); 13625 return -ENOMEM; 13626 } 13627 alloclen = lpfc_sli4_config(phba, mbox, 13628 LPFC_MBOX_SUBSYSTEM_FCOE, 13629 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13630 reqlen, 13631 LPFC_SLI4_MBX_NEMBED); 13632 if (alloclen < reqlen) { 13633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13634 "2934 Allocated DMA memory size (%d) " 13635 "is less than the requested DMA memory " 13636 "size (%d)\n", alloclen, reqlen); 13637 lpfc_sli4_mbox_cmd_free(phba, mbox); 13638 return -ENOMEM; 13639 } 13640 13641 /* Get the first SGE entry from the non-embedded DMA memory */ 13642 viraddr = mbox->sge_array->addr[0]; 13643 13644 /* Set up the SGL pages in the non-embedded DMA pages */ 13645 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13646 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13647 13648 /* pg_pairs tracks posted SGEs per loop iteration. */ 13649 pg_pairs = 0; 13650 list_for_each_entry_continue(psb, sblist, list) { 13651 /* Set up the sge entry */ 13652 sgl_pg_pairs->sgl_pg0_addr_lo = 13653 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 13654 sgl_pg_pairs->sgl_pg0_addr_hi = 13655 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 13656 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 13657 pdma_phys_bpl1 = psb->dma_phys_bpl + 13658 SGL_PAGE_SIZE; 13659 else 13660 pdma_phys_bpl1 = 0; 13661 sgl_pg_pairs->sgl_pg1_addr_lo = 13662 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 13663 sgl_pg_pairs->sgl_pg1_addr_hi = 13664 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 13665 /* Keep the first xri for this extent. */ 13666 if (pg_pairs == 0) 13667 xri_start = psb->cur_iocbq.sli4_xritag; 13668 sgl_pg_pairs++; 13669 pg_pairs++; 13670 xri_cnt++; 13671 13672 /* 13673 * Track two exit conditions - the loop has constructed 13674 * all of the caller's SGE pairs or all available 13675 * resource IDs in this extent are consumed. 13676 */ 13677 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt)) 13678 break; 13679 } 13680 rsrc_blk->rsrc_used += pg_pairs; 13681 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start); 13682 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 13683 13684 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13685 "3016 Post SCSI Extent SGL, start %d, cnt %d " 13686 "blk use %d\n", 13687 xri_start, pg_pairs, rsrc_blk->rsrc_used); 13688 /* Perform endian conversion if necessary */ 13689 sgl->word0 = cpu_to_le32(sgl->word0); 13690 if (!phba->sli4_hba.intr_enable) 13691 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13692 else { 13693 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 13694 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13695 } 13696 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13697 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13698 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13699 &shdr->response); 13700 if (rc != MBX_TIMEOUT) 13701 lpfc_sli4_mbox_cmd_free(phba, mbox); 13702 if (shdr_status || shdr_add_status || rc) { 13703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13704 "2935 POST_SGL_BLOCK mailbox command " 13705 "failed status x%x add_status x%x " 13706 "mbx status x%x\n", 13707 shdr_status, shdr_add_status, rc); 13708 return -ENXIO; 13709 } 13710 13711 /* Post only what is requested. */ 13712 if (xri_cnt >= cnt) 13713 break; 13714 } 13715 return rc; 13716 } 13717 13718 /** 13719 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13720 * @phba: pointer to lpfc_hba struct that the frame was received on 13721 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13722 * 13723 * This function checks the fields in the @fc_hdr to see if the FC frame is a 13724 * valid type of frame that the LPFC driver will handle. This function will 13725 * return a zero if the frame is a valid frame or a non zero value when the 13726 * frame does not pass the check. 13727 **/ 13728 static int 13729 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 13730 { 13731 /* make rctl_names static to save stack space */ 13732 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 13733 char *type_names[] = FC_TYPE_NAMES_INIT; 13734 struct fc_vft_header *fc_vft_hdr; 13735 uint32_t *header = (uint32_t *) fc_hdr; 13736 13737 switch (fc_hdr->fh_r_ctl) { 13738 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 13739 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 13740 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 13741 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 13742 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 13743 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 13744 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 13745 case FC_RCTL_DD_CMD_STATUS: /* command status */ 13746 case FC_RCTL_ELS_REQ: /* extended link services request */ 13747 case FC_RCTL_ELS_REP: /* extended link services reply */ 13748 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 13749 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 13750 case FC_RCTL_BA_NOP: /* basic link service NOP */ 13751 case FC_RCTL_BA_ABTS: /* basic link service abort */ 13752 case FC_RCTL_BA_RMC: /* remove connection */ 13753 case FC_RCTL_BA_ACC: /* basic accept */ 13754 case FC_RCTL_BA_RJT: /* basic reject */ 13755 case FC_RCTL_BA_PRMT: 13756 case FC_RCTL_ACK_1: /* acknowledge_1 */ 13757 case FC_RCTL_ACK_0: /* acknowledge_0 */ 13758 case FC_RCTL_P_RJT: /* port reject */ 13759 case FC_RCTL_F_RJT: /* fabric reject */ 13760 case FC_RCTL_P_BSY: /* port busy */ 13761 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 13762 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 13763 case FC_RCTL_LCR: /* link credit reset */ 13764 case FC_RCTL_END: /* end */ 13765 break; 13766 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 13767 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13768 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 13769 return lpfc_fc_frame_check(phba, fc_hdr); 13770 default: 13771 goto drop; 13772 } 13773 switch (fc_hdr->fh_type) { 13774 case FC_TYPE_BLS: 13775 case FC_TYPE_ELS: 13776 case FC_TYPE_FCP: 13777 case FC_TYPE_CT: 13778 break; 13779 case FC_TYPE_IP: 13780 case FC_TYPE_ILS: 13781 default: 13782 goto drop; 13783 } 13784 13785 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13786 "2538 Received frame rctl:%s type:%s " 13787 "Frame Data:%08x %08x %08x %08x %08x %08x\n", 13788 rctl_names[fc_hdr->fh_r_ctl], 13789 type_names[fc_hdr->fh_type], 13790 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 13791 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 13792 be32_to_cpu(header[4]), be32_to_cpu(header[5])); 13793 return 0; 13794 drop: 13795 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 13796 "2539 Dropped frame rctl:%s type:%s\n", 13797 rctl_names[fc_hdr->fh_r_ctl], 13798 type_names[fc_hdr->fh_type]); 13799 return 1; 13800 } 13801 13802 /** 13803 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 13804 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13805 * 13806 * This function processes the FC header to retrieve the VFI from the VF 13807 * header, if one exists. This function will return the VFI if one exists 13808 * or 0 if no VSAN Header exists. 13809 **/ 13810 static uint32_t 13811 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 13812 { 13813 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13814 13815 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 13816 return 0; 13817 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 13818 } 13819 13820 /** 13821 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 13822 * @phba: Pointer to the HBA structure to search for the vport on 13823 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13824 * @fcfi: The FC Fabric ID that the frame came from 13825 * 13826 * This function searches the @phba for a vport that matches the content of the 13827 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 13828 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 13829 * returns the matching vport pointer or NULL if unable to match frame to a 13830 * vport. 13831 **/ 13832 static struct lpfc_vport * 13833 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 13834 uint16_t fcfi) 13835 { 13836 struct lpfc_vport **vports; 13837 struct lpfc_vport *vport = NULL; 13838 int i; 13839 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 13840 fc_hdr->fh_d_id[1] << 8 | 13841 fc_hdr->fh_d_id[2]); 13842 if (did == Fabric_DID) 13843 return phba->pport; 13844 vports = lpfc_create_vport_work_array(phba); 13845 if (vports != NULL) 13846 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 13847 if (phba->fcf.fcfi == fcfi && 13848 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 13849 vports[i]->fc_myDID == did) { 13850 vport = vports[i]; 13851 break; 13852 } 13853 } 13854 lpfc_destroy_vport_work_array(phba, vports); 13855 return vport; 13856 } 13857 13858 /** 13859 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 13860 * @vport: The vport to work on. 13861 * 13862 * This function updates the receive sequence time stamp for this vport. The 13863 * receive sequence time stamp indicates the time that the last frame of the 13864 * the sequence that has been idle for the longest amount of time was received. 13865 * the driver uses this time stamp to indicate if any received sequences have 13866 * timed out. 13867 **/ 13868 void 13869 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 13870 { 13871 struct lpfc_dmabuf *h_buf; 13872 struct hbq_dmabuf *dmabuf = NULL; 13873 13874 /* get the oldest sequence on the rcv list */ 13875 h_buf = list_get_first(&vport->rcv_buffer_list, 13876 struct lpfc_dmabuf, list); 13877 if (!h_buf) 13878 return; 13879 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13880 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 13881 } 13882 13883 /** 13884 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 13885 * @vport: The vport that the received sequences were sent to. 13886 * 13887 * This function cleans up all outstanding received sequences. This is called 13888 * by the driver when a link event or user action invalidates all the received 13889 * sequences. 13890 **/ 13891 void 13892 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 13893 { 13894 struct lpfc_dmabuf *h_buf, *hnext; 13895 struct lpfc_dmabuf *d_buf, *dnext; 13896 struct hbq_dmabuf *dmabuf = NULL; 13897 13898 /* start with the oldest sequence on the rcv list */ 13899 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13900 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13901 list_del_init(&dmabuf->hbuf.list); 13902 list_for_each_entry_safe(d_buf, dnext, 13903 &dmabuf->dbuf.list, list) { 13904 list_del_init(&d_buf->list); 13905 lpfc_in_buf_free(vport->phba, d_buf); 13906 } 13907 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13908 } 13909 } 13910 13911 /** 13912 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 13913 * @vport: The vport that the received sequences were sent to. 13914 * 13915 * This function determines whether any received sequences have timed out by 13916 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 13917 * indicates that there is at least one timed out sequence this routine will 13918 * go through the received sequences one at a time from most inactive to most 13919 * active to determine which ones need to be cleaned up. Once it has determined 13920 * that a sequence needs to be cleaned up it will simply free up the resources 13921 * without sending an abort. 13922 **/ 13923 void 13924 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 13925 { 13926 struct lpfc_dmabuf *h_buf, *hnext; 13927 struct lpfc_dmabuf *d_buf, *dnext; 13928 struct hbq_dmabuf *dmabuf = NULL; 13929 unsigned long timeout; 13930 int abort_count = 0; 13931 13932 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13933 vport->rcv_buffer_time_stamp); 13934 if (list_empty(&vport->rcv_buffer_list) || 13935 time_before(jiffies, timeout)) 13936 return; 13937 /* start with the oldest sequence on the rcv list */ 13938 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13939 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13940 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13941 dmabuf->time_stamp); 13942 if (time_before(jiffies, timeout)) 13943 break; 13944 abort_count++; 13945 list_del_init(&dmabuf->hbuf.list); 13946 list_for_each_entry_safe(d_buf, dnext, 13947 &dmabuf->dbuf.list, list) { 13948 list_del_init(&d_buf->list); 13949 lpfc_in_buf_free(vport->phba, d_buf); 13950 } 13951 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13952 } 13953 if (abort_count) 13954 lpfc_update_rcv_time_stamp(vport); 13955 } 13956 13957 /** 13958 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 13959 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 13960 * 13961 * This function searches through the existing incomplete sequences that have 13962 * been sent to this @vport. If the frame matches one of the incomplete 13963 * sequences then the dbuf in the @dmabuf is added to the list of frames that 13964 * make up that sequence. If no sequence is found that matches this frame then 13965 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 13966 * This function returns a pointer to the first dmabuf in the sequence list that 13967 * the frame was linked to. 13968 **/ 13969 static struct hbq_dmabuf * 13970 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 13971 { 13972 struct fc_frame_header *new_hdr; 13973 struct fc_frame_header *temp_hdr; 13974 struct lpfc_dmabuf *d_buf; 13975 struct lpfc_dmabuf *h_buf; 13976 struct hbq_dmabuf *seq_dmabuf = NULL; 13977 struct hbq_dmabuf *temp_dmabuf = NULL; 13978 13979 INIT_LIST_HEAD(&dmabuf->dbuf.list); 13980 dmabuf->time_stamp = jiffies; 13981 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13982 /* Use the hdr_buf to find the sequence that this frame belongs to */ 13983 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 13984 temp_hdr = (struct fc_frame_header *)h_buf->virt; 13985 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 13986 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 13987 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 13988 continue; 13989 /* found a pending sequence that matches this frame */ 13990 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13991 break; 13992 } 13993 if (!seq_dmabuf) { 13994 /* 13995 * This indicates first frame received for this sequence. 13996 * Queue the buffer on the vport's rcv_buffer_list. 13997 */ 13998 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 13999 lpfc_update_rcv_time_stamp(vport); 14000 return dmabuf; 14001 } 14002 temp_hdr = seq_dmabuf->hbuf.virt; 14003 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 14004 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14005 list_del_init(&seq_dmabuf->hbuf.list); 14006 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 14007 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14008 lpfc_update_rcv_time_stamp(vport); 14009 return dmabuf; 14010 } 14011 /* move this sequence to the tail to indicate a young sequence */ 14012 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 14013 seq_dmabuf->time_stamp = jiffies; 14014 lpfc_update_rcv_time_stamp(vport); 14015 if (list_empty(&seq_dmabuf->dbuf.list)) { 14016 temp_hdr = dmabuf->hbuf.virt; 14017 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14018 return seq_dmabuf; 14019 } 14020 /* find the correct place in the sequence to insert this frame */ 14021 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 14022 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14023 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 14024 /* 14025 * If the frame's sequence count is greater than the frame on 14026 * the list then insert the frame right after this frame 14027 */ 14028 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 14029 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14030 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 14031 return seq_dmabuf; 14032 } 14033 } 14034 return NULL; 14035 } 14036 14037 /** 14038 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 14039 * @vport: pointer to a vitural port 14040 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14041 * 14042 * This function tries to abort from the partially assembed sequence, described 14043 * by the information from basic abbort @dmabuf. It checks to see whether such 14044 * partially assembled sequence held by the driver. If so, it shall free up all 14045 * the frames from the partially assembled sequence. 14046 * 14047 * Return 14048 * true -- if there is matching partially assembled sequence present and all 14049 * the frames freed with the sequence; 14050 * false -- if there is no matching partially assembled sequence present so 14051 * nothing got aborted in the lower layer driver 14052 **/ 14053 static bool 14054 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 14055 struct hbq_dmabuf *dmabuf) 14056 { 14057 struct fc_frame_header *new_hdr; 14058 struct fc_frame_header *temp_hdr; 14059 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 14060 struct hbq_dmabuf *seq_dmabuf = NULL; 14061 14062 /* Use the hdr_buf to find the sequence that matches this frame */ 14063 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14064 INIT_LIST_HEAD(&dmabuf->hbuf.list); 14065 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14066 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14067 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14068 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14069 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14070 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14071 continue; 14072 /* found a pending sequence that matches this frame */ 14073 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14074 break; 14075 } 14076 14077 /* Free up all the frames from the partially assembled sequence */ 14078 if (seq_dmabuf) { 14079 list_for_each_entry_safe(d_buf, n_buf, 14080 &seq_dmabuf->dbuf.list, list) { 14081 list_del_init(&d_buf->list); 14082 lpfc_in_buf_free(vport->phba, d_buf); 14083 } 14084 return true; 14085 } 14086 return false; 14087 } 14088 14089 /** 14090 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14091 * @phba: Pointer to HBA context object. 14092 * @cmd_iocbq: pointer to the command iocbq structure. 14093 * @rsp_iocbq: pointer to the response iocbq structure. 14094 * 14095 * This function handles the sequence abort response iocb command complete 14096 * event. It properly releases the memory allocated to the sequence abort 14097 * accept iocb. 14098 **/ 14099 static void 14100 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 14101 struct lpfc_iocbq *cmd_iocbq, 14102 struct lpfc_iocbq *rsp_iocbq) 14103 { 14104 if (cmd_iocbq) 14105 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14106 14107 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 14108 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 14109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14110 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 14111 rsp_iocbq->iocb.ulpStatus, 14112 rsp_iocbq->iocb.un.ulpWord[4]); 14113 } 14114 14115 /** 14116 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 14117 * @phba: Pointer to HBA context object. 14118 * @xri: xri id in transaction. 14119 * 14120 * This function validates the xri maps to the known range of XRIs allocated an 14121 * used by the driver. 14122 **/ 14123 uint16_t 14124 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 14125 uint16_t xri) 14126 { 14127 int i; 14128 14129 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 14130 if (xri == phba->sli4_hba.xri_ids[i]) 14131 return i; 14132 } 14133 return NO_XRI; 14134 } 14135 14136 14137 /** 14138 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 14139 * @phba: Pointer to HBA context object. 14140 * @fc_hdr: pointer to a FC frame header. 14141 * 14142 * This function sends a basic response to a previous unsol sequence abort 14143 * event after aborting the sequence handling. 14144 **/ 14145 static void 14146 lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, 14147 struct fc_frame_header *fc_hdr) 14148 { 14149 struct lpfc_iocbq *ctiocb = NULL; 14150 struct lpfc_nodelist *ndlp; 14151 uint16_t oxid, rxid; 14152 uint32_t sid, fctl; 14153 IOCB_t *icmd; 14154 int rc; 14155 14156 if (!lpfc_is_link_up(phba)) 14157 return; 14158 14159 sid = sli4_sid_from_fc_hdr(fc_hdr); 14160 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 14161 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 14162 14163 ndlp = lpfc_findnode_did(phba->pport, sid); 14164 if (!ndlp) { 14165 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14166 "1268 Find ndlp returned NULL for oxid:x%x " 14167 "SID:x%x\n", oxid, sid); 14168 return; 14169 } 14170 if (lpfc_sli4_xri_inrange(phba, rxid)) 14171 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 14172 14173 /* Allocate buffer for rsp iocb */ 14174 ctiocb = lpfc_sli_get_iocbq(phba); 14175 if (!ctiocb) 14176 return; 14177 14178 /* Extract the F_CTL field from FC_HDR */ 14179 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 14180 14181 icmd = &ctiocb->iocb; 14182 icmd->un.xseq64.bdl.bdeSize = 0; 14183 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 14184 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 14185 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 14186 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 14187 14188 /* Fill in the rest of iocb fields */ 14189 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 14190 icmd->ulpBdeCount = 0; 14191 icmd->ulpLe = 1; 14192 icmd->ulpClass = CLASS3; 14193 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 14194 ctiocb->context1 = ndlp; 14195 14196 ctiocb->iocb_cmpl = NULL; 14197 ctiocb->vport = phba->pport; 14198 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 14199 ctiocb->sli4_lxritag = NO_XRI; 14200 ctiocb->sli4_xritag = NO_XRI; 14201 14202 /* If the oxid maps to the FCP XRI range or if it is out of range, 14203 * send a BLS_RJT. The driver no longer has that exchange. 14204 * Override the IOCB for a BA_RJT. 14205 */ 14206 if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + 14207 phba->sli4_hba.max_cfg_param.xri_base) || 14208 oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + 14209 phba->sli4_hba.max_cfg_param.xri_base)) { 14210 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 14211 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 14212 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 14213 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 14214 } 14215 14216 if (fctl & FC_FC_EX_CTX) { 14217 /* ABTS sent by responder to CT exchange, construction 14218 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 14219 * field and RX_ID from ABTS for RX_ID field. 14220 */ 14221 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 14222 } else { 14223 /* ABTS sent by initiator to CT exchange, construction 14224 * of BA_ACC will need to allocate a new XRI as for the 14225 * XRI_TAG field. 14226 */ 14227 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 14228 } 14229 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 14230 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14231 14232 /* Xmit CT abts response on exchange <xid> */ 14233 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14234 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 14235 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 14236 14237 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 14238 if (rc == IOCB_ERROR) { 14239 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 14240 "2925 Failed to issue CT ABTS RSP x%x on " 14241 "xri x%x, Data x%x\n", 14242 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 14243 phba->link_state); 14244 lpfc_sli_release_iocbq(phba, ctiocb); 14245 } 14246 } 14247 14248 /** 14249 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 14250 * @vport: Pointer to the vport on which this sequence was received 14251 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14252 * 14253 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 14254 * receive sequence is only partially assembed by the driver, it shall abort 14255 * the partially assembled frames for the sequence. Otherwise, if the 14256 * unsolicited receive sequence has been completely assembled and passed to 14257 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 14258 * unsolicited sequence has been aborted. After that, it will issue a basic 14259 * accept to accept the abort. 14260 **/ 14261 void 14262 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 14263 struct hbq_dmabuf *dmabuf) 14264 { 14265 struct lpfc_hba *phba = vport->phba; 14266 struct fc_frame_header fc_hdr; 14267 uint32_t fctl; 14268 bool abts_par; 14269 14270 /* Make a copy of fc_hdr before the dmabuf being released */ 14271 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 14272 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 14273 14274 if (fctl & FC_FC_EX_CTX) { 14275 /* 14276 * ABTS sent by responder to exchange, just free the buffer 14277 */ 14278 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14279 } else { 14280 /* 14281 * ABTS sent by initiator to exchange, need to do cleanup 14282 */ 14283 /* Try to abort partially assembled seq */ 14284 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); 14285 14286 /* Send abort to ULP if partially seq abort failed */ 14287 if (abts_par == false) 14288 lpfc_sli4_send_seq_to_ulp(vport, dmabuf); 14289 else 14290 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14291 } 14292 /* Send basic accept (BA_ACC) to the abort requester */ 14293 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); 14294 } 14295 14296 /** 14297 * lpfc_seq_complete - Indicates if a sequence is complete 14298 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14299 * 14300 * This function checks the sequence, starting with the frame described by 14301 * @dmabuf, to see if all the frames associated with this sequence are present. 14302 * the frames associated with this sequence are linked to the @dmabuf using the 14303 * dbuf list. This function looks for two major things. 1) That the first frame 14304 * has a sequence count of zero. 2) There is a frame with last frame of sequence 14305 * set. 3) That there are no holes in the sequence count. The function will 14306 * return 1 when the sequence is complete, otherwise it will return 0. 14307 **/ 14308 static int 14309 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 14310 { 14311 struct fc_frame_header *hdr; 14312 struct lpfc_dmabuf *d_buf; 14313 struct hbq_dmabuf *seq_dmabuf; 14314 uint32_t fctl; 14315 int seq_count = 0; 14316 14317 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14318 /* make sure first fame of sequence has a sequence count of zero */ 14319 if (hdr->fh_seq_cnt != seq_count) 14320 return 0; 14321 fctl = (hdr->fh_f_ctl[0] << 16 | 14322 hdr->fh_f_ctl[1] << 8 | 14323 hdr->fh_f_ctl[2]); 14324 /* If last frame of sequence we can return success. */ 14325 if (fctl & FC_FC_END_SEQ) 14326 return 1; 14327 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 14328 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14329 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14330 /* If there is a hole in the sequence count then fail. */ 14331 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 14332 return 0; 14333 fctl = (hdr->fh_f_ctl[0] << 16 | 14334 hdr->fh_f_ctl[1] << 8 | 14335 hdr->fh_f_ctl[2]); 14336 /* If last frame of sequence we can return success. */ 14337 if (fctl & FC_FC_END_SEQ) 14338 return 1; 14339 } 14340 return 0; 14341 } 14342 14343 /** 14344 * lpfc_prep_seq - Prep sequence for ULP processing 14345 * @vport: Pointer to the vport on which this sequence was received 14346 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14347 * 14348 * This function takes a sequence, described by a list of frames, and creates 14349 * a list of iocbq structures to describe the sequence. This iocbq list will be 14350 * used to issue to the generic unsolicited sequence handler. This routine 14351 * returns a pointer to the first iocbq in the list. If the function is unable 14352 * to allocate an iocbq then it throw out the received frames that were not 14353 * able to be described and return a pointer to the first iocbq. If unable to 14354 * allocate any iocbqs (including the first) this function will return NULL. 14355 **/ 14356 static struct lpfc_iocbq * 14357 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 14358 { 14359 struct hbq_dmabuf *hbq_buf; 14360 struct lpfc_dmabuf *d_buf, *n_buf; 14361 struct lpfc_iocbq *first_iocbq, *iocbq; 14362 struct fc_frame_header *fc_hdr; 14363 uint32_t sid; 14364 uint32_t len, tot_len; 14365 struct ulp_bde64 *pbde; 14366 14367 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14368 /* remove from receive buffer list */ 14369 list_del_init(&seq_dmabuf->hbuf.list); 14370 lpfc_update_rcv_time_stamp(vport); 14371 /* get the Remote Port's SID */ 14372 sid = sli4_sid_from_fc_hdr(fc_hdr); 14373 tot_len = 0; 14374 /* Get an iocbq struct to fill in. */ 14375 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 14376 if (first_iocbq) { 14377 /* Initialize the first IOCB. */ 14378 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 14379 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 14380 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 14381 first_iocbq->iocb.ulpContext = NO_XRI; 14382 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 14383 be16_to_cpu(fc_hdr->fh_ox_id); 14384 /* iocbq is prepped for internal consumption. Physical vpi. */ 14385 first_iocbq->iocb.unsli3.rcvsli3.vpi = 14386 vport->phba->vpi_ids[vport->vpi]; 14387 /* put the first buffer into the first IOCBq */ 14388 first_iocbq->context2 = &seq_dmabuf->dbuf; 14389 first_iocbq->context3 = NULL; 14390 first_iocbq->iocb.ulpBdeCount = 1; 14391 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14392 LPFC_DATA_BUF_SIZE; 14393 first_iocbq->iocb.un.rcvels.remoteID = sid; 14394 tot_len = bf_get(lpfc_rcqe_length, 14395 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 14396 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14397 } 14398 iocbq = first_iocbq; 14399 /* 14400 * Each IOCBq can have two Buffers assigned, so go through the list 14401 * of buffers for this sequence and save two buffers in each IOCBq 14402 */ 14403 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 14404 if (!iocbq) { 14405 lpfc_in_buf_free(vport->phba, d_buf); 14406 continue; 14407 } 14408 if (!iocbq->context3) { 14409 iocbq->context3 = d_buf; 14410 iocbq->iocb.ulpBdeCount++; 14411 pbde = (struct ulp_bde64 *) 14412 &iocbq->iocb.unsli3.sli3Words[4]; 14413 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 14414 14415 /* We need to get the size out of the right CQE */ 14416 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14417 len = bf_get(lpfc_rcqe_length, 14418 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14419 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 14420 tot_len += len; 14421 } else { 14422 iocbq = lpfc_sli_get_iocbq(vport->phba); 14423 if (!iocbq) { 14424 if (first_iocbq) { 14425 first_iocbq->iocb.ulpStatus = 14426 IOSTAT_FCP_RSP_ERROR; 14427 first_iocbq->iocb.un.ulpWord[4] = 14428 IOERR_NO_RESOURCES; 14429 } 14430 lpfc_in_buf_free(vport->phba, d_buf); 14431 continue; 14432 } 14433 iocbq->context2 = d_buf; 14434 iocbq->context3 = NULL; 14435 iocbq->iocb.ulpBdeCount = 1; 14436 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 14437 LPFC_DATA_BUF_SIZE; 14438 14439 /* We need to get the size out of the right CQE */ 14440 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14441 len = bf_get(lpfc_rcqe_length, 14442 &hbq_buf->cq_event.cqe.rcqe_cmpl); 14443 tot_len += len; 14444 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 14445 14446 iocbq->iocb.un.rcvels.remoteID = sid; 14447 list_add_tail(&iocbq->list, &first_iocbq->list); 14448 } 14449 } 14450 return first_iocbq; 14451 } 14452 14453 static void 14454 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 14455 struct hbq_dmabuf *seq_dmabuf) 14456 { 14457 struct fc_frame_header *fc_hdr; 14458 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 14459 struct lpfc_hba *phba = vport->phba; 14460 14461 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 14462 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 14463 if (!iocbq) { 14464 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14465 "2707 Ring %d handler: Failed to allocate " 14466 "iocb Rctl x%x Type x%x received\n", 14467 LPFC_ELS_RING, 14468 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 14469 return; 14470 } 14471 if (!lpfc_complete_unsol_iocb(phba, 14472 &phba->sli.ring[LPFC_ELS_RING], 14473 iocbq, fc_hdr->fh_r_ctl, 14474 fc_hdr->fh_type)) 14475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14476 "2540 Ring %d handler: unexpected Rctl " 14477 "x%x Type x%x received\n", 14478 LPFC_ELS_RING, 14479 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 14480 14481 /* Free iocb created in lpfc_prep_seq */ 14482 list_for_each_entry_safe(curr_iocb, next_iocb, 14483 &iocbq->list, list) { 14484 list_del_init(&curr_iocb->list); 14485 lpfc_sli_release_iocbq(phba, curr_iocb); 14486 } 14487 lpfc_sli_release_iocbq(phba, iocbq); 14488 } 14489 14490 /** 14491 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 14492 * @phba: Pointer to HBA context object. 14493 * 14494 * This function is called with no lock held. This function processes all 14495 * the received buffers and gives it to upper layers when a received buffer 14496 * indicates that it is the final frame in the sequence. The interrupt 14497 * service routine processes received buffers at interrupt contexts and adds 14498 * received dma buffers to the rb_pend_list queue and signals the worker thread. 14499 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 14500 * appropriate receive function when the final frame in a sequence is received. 14501 **/ 14502 void 14503 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 14504 struct hbq_dmabuf *dmabuf) 14505 { 14506 struct hbq_dmabuf *seq_dmabuf; 14507 struct fc_frame_header *fc_hdr; 14508 struct lpfc_vport *vport; 14509 uint32_t fcfi; 14510 14511 /* Process each received buffer */ 14512 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14513 /* check to see if this a valid type of frame */ 14514 if (lpfc_fc_frame_check(phba, fc_hdr)) { 14515 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14516 return; 14517 } 14518 if ((bf_get(lpfc_cqe_code, 14519 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 14520 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 14521 &dmabuf->cq_event.cqe.rcqe_cmpl); 14522 else 14523 fcfi = bf_get(lpfc_rcqe_fcf_id, 14524 &dmabuf->cq_event.cqe.rcqe_cmpl); 14525 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 14526 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 14527 /* throw out the frame */ 14528 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14529 return; 14530 } 14531 /* Handle the basic abort sequence (BA_ABTS) event */ 14532 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 14533 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 14534 return; 14535 } 14536 14537 /* Link this frame */ 14538 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 14539 if (!seq_dmabuf) { 14540 /* unable to add frame to vport - throw it out */ 14541 lpfc_in_buf_free(phba, &dmabuf->dbuf); 14542 return; 14543 } 14544 /* If not last frame in sequence continue processing frames. */ 14545 if (!lpfc_seq_complete(seq_dmabuf)) 14546 return; 14547 14548 /* Send the complete sequence to the upper layer protocol */ 14549 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 14550 } 14551 14552 /** 14553 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 14554 * @phba: pointer to lpfc hba data structure. 14555 * 14556 * This routine is invoked to post rpi header templates to the 14557 * HBA consistent with the SLI-4 interface spec. This routine 14558 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14559 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14560 * 14561 * This routine does not require any locks. It's usage is expected 14562 * to be driver load or reset recovery when the driver is 14563 * sequential. 14564 * 14565 * Return codes 14566 * 0 - successful 14567 * -EIO - The mailbox failed to complete successfully. 14568 * When this error occurs, the driver is not guaranteed 14569 * to have any rpi regions posted to the device and 14570 * must either attempt to repost the regions or take a 14571 * fatal error. 14572 **/ 14573 int 14574 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 14575 { 14576 struct lpfc_rpi_hdr *rpi_page; 14577 uint32_t rc = 0; 14578 uint16_t lrpi = 0; 14579 14580 /* SLI4 ports that support extents do not require RPI headers. */ 14581 if (!phba->sli4_hba.rpi_hdrs_in_use) 14582 goto exit; 14583 if (phba->sli4_hba.extents_in_use) 14584 return -EIO; 14585 14586 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 14587 /* 14588 * Assign the rpi headers a physical rpi only if the driver 14589 * has not initialized those resources. A port reset only 14590 * needs the headers posted. 14591 */ 14592 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 14593 LPFC_RPI_RSRC_RDY) 14594 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14595 14596 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 14597 if (rc != MBX_SUCCESS) { 14598 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14599 "2008 Error %d posting all rpi " 14600 "headers\n", rc); 14601 rc = -EIO; 14602 break; 14603 } 14604 } 14605 14606 exit: 14607 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 14608 LPFC_RPI_RSRC_RDY); 14609 return rc; 14610 } 14611 14612 /** 14613 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 14614 * @phba: pointer to lpfc hba data structure. 14615 * @rpi_page: pointer to the rpi memory region. 14616 * 14617 * This routine is invoked to post a single rpi header to the 14618 * HBA consistent with the SLI-4 interface spec. This memory region 14619 * maps up to 64 rpi context regions. 14620 * 14621 * Return codes 14622 * 0 - successful 14623 * -ENOMEM - No available memory 14624 * -EIO - The mailbox failed to complete successfully. 14625 **/ 14626 int 14627 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 14628 { 14629 LPFC_MBOXQ_t *mboxq; 14630 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 14631 uint32_t rc = 0; 14632 uint32_t shdr_status, shdr_add_status; 14633 union lpfc_sli4_cfg_shdr *shdr; 14634 14635 /* SLI4 ports that support extents do not require RPI headers. */ 14636 if (!phba->sli4_hba.rpi_hdrs_in_use) 14637 return rc; 14638 if (phba->sli4_hba.extents_in_use) 14639 return -EIO; 14640 14641 /* The port is notified of the header region via a mailbox command. */ 14642 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14643 if (!mboxq) { 14644 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14645 "2001 Unable to allocate memory for issuing " 14646 "SLI_CONFIG_SPECIAL mailbox command\n"); 14647 return -ENOMEM; 14648 } 14649 14650 /* Post all rpi memory regions to the port. */ 14651 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 14652 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14653 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 14654 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 14655 sizeof(struct lpfc_sli4_cfg_mhdr), 14656 LPFC_SLI4_MBX_EMBED); 14657 14658 14659 /* Post the physical rpi to the port for this rpi header. */ 14660 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 14661 rpi_page->start_rpi); 14662 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 14663 hdr_tmpl, rpi_page->page_count); 14664 14665 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 14666 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 14667 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 14668 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 14669 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14670 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14671 if (rc != MBX_TIMEOUT) 14672 mempool_free(mboxq, phba->mbox_mem_pool); 14673 if (shdr_status || shdr_add_status || rc) { 14674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14675 "2514 POST_RPI_HDR mailbox failed with " 14676 "status x%x add_status x%x, mbx status x%x\n", 14677 shdr_status, shdr_add_status, rc); 14678 rc = -ENXIO; 14679 } 14680 return rc; 14681 } 14682 14683 /** 14684 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 14685 * @phba: pointer to lpfc hba data structure. 14686 * 14687 * This routine is invoked to post rpi header templates to the 14688 * HBA consistent with the SLI-4 interface spec. This routine 14689 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14690 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14691 * 14692 * Returns 14693 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 14694 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 14695 **/ 14696 int 14697 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 14698 { 14699 unsigned long rpi; 14700 uint16_t max_rpi, rpi_limit; 14701 uint16_t rpi_remaining, lrpi = 0; 14702 struct lpfc_rpi_hdr *rpi_hdr; 14703 14704 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 14705 rpi_limit = phba->sli4_hba.next_rpi; 14706 14707 /* 14708 * Fetch the next logical rpi. Because this index is logical, 14709 * the driver starts at 0 each time. 14710 */ 14711 spin_lock_irq(&phba->hbalock); 14712 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 14713 if (rpi >= rpi_limit) 14714 rpi = LPFC_RPI_ALLOC_ERROR; 14715 else { 14716 set_bit(rpi, phba->sli4_hba.rpi_bmask); 14717 phba->sli4_hba.max_cfg_param.rpi_used++; 14718 phba->sli4_hba.rpi_count++; 14719 } 14720 14721 /* 14722 * Don't try to allocate more rpi header regions if the device limit 14723 * has been exhausted. 14724 */ 14725 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 14726 (phba->sli4_hba.rpi_count >= max_rpi)) { 14727 spin_unlock_irq(&phba->hbalock); 14728 return rpi; 14729 } 14730 14731 /* 14732 * RPI header postings are not required for SLI4 ports capable of 14733 * extents. 14734 */ 14735 if (!phba->sli4_hba.rpi_hdrs_in_use) { 14736 spin_unlock_irq(&phba->hbalock); 14737 return rpi; 14738 } 14739 14740 /* 14741 * If the driver is running low on rpi resources, allocate another 14742 * page now. Note that the next_rpi value is used because 14743 * it represents how many are actually in use whereas max_rpi notes 14744 * how many are supported max by the device. 14745 */ 14746 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 14747 spin_unlock_irq(&phba->hbalock); 14748 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14749 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14750 if (!rpi_hdr) { 14751 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14752 "2002 Error Could not grow rpi " 14753 "count\n"); 14754 } else { 14755 lrpi = rpi_hdr->start_rpi; 14756 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14757 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14758 } 14759 } 14760 14761 return rpi; 14762 } 14763 14764 /** 14765 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14766 * @phba: pointer to lpfc hba data structure. 14767 * 14768 * This routine is invoked to release an rpi to the pool of 14769 * available rpis maintained by the driver. 14770 **/ 14771 void 14772 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14773 { 14774 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 14775 phba->sli4_hba.rpi_count--; 14776 phba->sli4_hba.max_cfg_param.rpi_used--; 14777 } 14778 } 14779 14780 /** 14781 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14782 * @phba: pointer to lpfc hba data structure. 14783 * 14784 * This routine is invoked to release an rpi to the pool of 14785 * available rpis maintained by the driver. 14786 **/ 14787 void 14788 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14789 { 14790 spin_lock_irq(&phba->hbalock); 14791 __lpfc_sli4_free_rpi(phba, rpi); 14792 spin_unlock_irq(&phba->hbalock); 14793 } 14794 14795 /** 14796 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 14797 * @phba: pointer to lpfc hba data structure. 14798 * 14799 * This routine is invoked to remove the memory region that 14800 * provided rpi via a bitmask. 14801 **/ 14802 void 14803 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14804 { 14805 kfree(phba->sli4_hba.rpi_bmask); 14806 kfree(phba->sli4_hba.rpi_ids); 14807 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 14808 } 14809 14810 /** 14811 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 14812 * @phba: pointer to lpfc hba data structure. 14813 * 14814 * This routine is invoked to remove the memory region that 14815 * provided rpi via a bitmask. 14816 **/ 14817 int 14818 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 14819 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 14820 { 14821 LPFC_MBOXQ_t *mboxq; 14822 struct lpfc_hba *phba = ndlp->phba; 14823 int rc; 14824 14825 /* The port is notified of the header region via a mailbox command. */ 14826 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14827 if (!mboxq) 14828 return -ENOMEM; 14829 14830 /* Post all rpi memory regions to the port. */ 14831 lpfc_resume_rpi(mboxq, ndlp); 14832 if (cmpl) { 14833 mboxq->mbox_cmpl = cmpl; 14834 mboxq->context1 = arg; 14835 mboxq->context2 = ndlp; 14836 } else 14837 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14838 mboxq->vport = ndlp->vport; 14839 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14840 if (rc == MBX_NOT_FINISHED) { 14841 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14842 "2010 Resume RPI Mailbox failed " 14843 "status %d, mbxStatus x%x\n", rc, 14844 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14845 mempool_free(mboxq, phba->mbox_mem_pool); 14846 return -EIO; 14847 } 14848 return 0; 14849 } 14850 14851 /** 14852 * lpfc_sli4_init_vpi - Initialize a vpi with the port 14853 * @vport: Pointer to the vport for which the vpi is being initialized 14854 * 14855 * This routine is invoked to activate a vpi with the port. 14856 * 14857 * Returns: 14858 * 0 success 14859 * -Evalue otherwise 14860 **/ 14861 int 14862 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 14863 { 14864 LPFC_MBOXQ_t *mboxq; 14865 int rc = 0; 14866 int retval = MBX_SUCCESS; 14867 uint32_t mbox_tmo; 14868 struct lpfc_hba *phba = vport->phba; 14869 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14870 if (!mboxq) 14871 return -ENOMEM; 14872 lpfc_init_vpi(phba, mboxq, vport->vpi); 14873 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 14874 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 14875 if (rc != MBX_SUCCESS) { 14876 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 14877 "2022 INIT VPI Mailbox failed " 14878 "status %d, mbxStatus x%x\n", rc, 14879 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14880 retval = -EIO; 14881 } 14882 if (rc != MBX_TIMEOUT) 14883 mempool_free(mboxq, vport->phba->mbox_mem_pool); 14884 14885 return retval; 14886 } 14887 14888 /** 14889 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 14890 * @phba: pointer to lpfc hba data structure. 14891 * @mboxq: Pointer to mailbox object. 14892 * 14893 * This routine is invoked to manually add a single FCF record. The caller 14894 * must pass a completely initialized FCF_Record. This routine takes 14895 * care of the nonembedded mailbox operations. 14896 **/ 14897 static void 14898 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 14899 { 14900 void *virt_addr; 14901 union lpfc_sli4_cfg_shdr *shdr; 14902 uint32_t shdr_status, shdr_add_status; 14903 14904 virt_addr = mboxq->sge_array->addr[0]; 14905 /* The IOCTL status is embedded in the mailbox subheader. */ 14906 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 14907 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14908 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14909 14910 if ((shdr_status || shdr_add_status) && 14911 (shdr_status != STATUS_FCF_IN_USE)) 14912 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14913 "2558 ADD_FCF_RECORD mailbox failed with " 14914 "status x%x add_status x%x\n", 14915 shdr_status, shdr_add_status); 14916 14917 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14918 } 14919 14920 /** 14921 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 14922 * @phba: pointer to lpfc hba data structure. 14923 * @fcf_record: pointer to the initialized fcf record to add. 14924 * 14925 * This routine is invoked to manually add a single FCF record. The caller 14926 * must pass a completely initialized FCF_Record. This routine takes 14927 * care of the nonembedded mailbox operations. 14928 **/ 14929 int 14930 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 14931 { 14932 int rc = 0; 14933 LPFC_MBOXQ_t *mboxq; 14934 uint8_t *bytep; 14935 void *virt_addr; 14936 dma_addr_t phys_addr; 14937 struct lpfc_mbx_sge sge; 14938 uint32_t alloc_len, req_len; 14939 uint32_t fcfindex; 14940 14941 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14942 if (!mboxq) { 14943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14944 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 14945 return -ENOMEM; 14946 } 14947 14948 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 14949 sizeof(uint32_t); 14950 14951 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14952 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14953 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 14954 req_len, LPFC_SLI4_MBX_NEMBED); 14955 if (alloc_len < req_len) { 14956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14957 "2523 Allocated DMA memory size (x%x) is " 14958 "less than the requested DMA memory " 14959 "size (x%x)\n", alloc_len, req_len); 14960 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14961 return -ENOMEM; 14962 } 14963 14964 /* 14965 * Get the first SGE entry from the non-embedded DMA memory. This 14966 * routine only uses a single SGE. 14967 */ 14968 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 14969 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 14970 virt_addr = mboxq->sge_array->addr[0]; 14971 /* 14972 * Configure the FCF record for FCFI 0. This is the driver's 14973 * hardcoded default and gets used in nonFIP mode. 14974 */ 14975 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 14976 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 14977 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 14978 14979 /* 14980 * Copy the fcf_index and the FCF Record Data. The data starts after 14981 * the FCoE header plus word10. The data copy needs to be endian 14982 * correct. 14983 */ 14984 bytep += sizeof(uint32_t); 14985 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 14986 mboxq->vport = phba->pport; 14987 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 14988 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14989 if (rc == MBX_NOT_FINISHED) { 14990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14991 "2515 ADD_FCF_RECORD mailbox failed with " 14992 "status 0x%x\n", rc); 14993 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14994 rc = -EIO; 14995 } else 14996 rc = 0; 14997 14998 return rc; 14999 } 15000 15001 /** 15002 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 15003 * @phba: pointer to lpfc hba data structure. 15004 * @fcf_record: pointer to the fcf record to write the default data. 15005 * @fcf_index: FCF table entry index. 15006 * 15007 * This routine is invoked to build the driver's default FCF record. The 15008 * values used are hardcoded. This routine handles memory initialization. 15009 * 15010 **/ 15011 void 15012 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 15013 struct fcf_record *fcf_record, 15014 uint16_t fcf_index) 15015 { 15016 memset(fcf_record, 0, sizeof(struct fcf_record)); 15017 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 15018 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 15019 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 15020 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 15021 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 15022 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 15023 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 15024 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 15025 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 15026 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 15027 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 15028 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 15029 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 15030 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 15031 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 15032 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 15033 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 15034 /* Set the VLAN bit map */ 15035 if (phba->valid_vlan) { 15036 fcf_record->vlan_bitmap[phba->vlan_id / 8] 15037 = 1 << (phba->vlan_id % 8); 15038 } 15039 } 15040 15041 /** 15042 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 15043 * @phba: pointer to lpfc hba data structure. 15044 * @fcf_index: FCF table entry offset. 15045 * 15046 * This routine is invoked to scan the entire FCF table by reading FCF 15047 * record and processing it one at a time starting from the @fcf_index 15048 * for initial FCF discovery or fast FCF failover rediscovery. 15049 * 15050 * Return 0 if the mailbox command is submitted successfully, none 0 15051 * otherwise. 15052 **/ 15053 int 15054 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15055 { 15056 int rc = 0, error; 15057 LPFC_MBOXQ_t *mboxq; 15058 15059 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 15060 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 15061 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15062 if (!mboxq) { 15063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15064 "2000 Failed to allocate mbox for " 15065 "READ_FCF cmd\n"); 15066 error = -ENOMEM; 15067 goto fail_fcf_scan; 15068 } 15069 /* Construct the read FCF record mailbox command */ 15070 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15071 if (rc) { 15072 error = -EINVAL; 15073 goto fail_fcf_scan; 15074 } 15075 /* Issue the mailbox command asynchronously */ 15076 mboxq->vport = phba->pport; 15077 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 15078 15079 spin_lock_irq(&phba->hbalock); 15080 phba->hba_flag |= FCF_TS_INPROG; 15081 spin_unlock_irq(&phba->hbalock); 15082 15083 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15084 if (rc == MBX_NOT_FINISHED) 15085 error = -EIO; 15086 else { 15087 /* Reset eligible FCF count for new scan */ 15088 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 15089 phba->fcf.eligible_fcf_cnt = 0; 15090 error = 0; 15091 } 15092 fail_fcf_scan: 15093 if (error) { 15094 if (mboxq) 15095 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15096 /* FCF scan failed, clear FCF_TS_INPROG flag */ 15097 spin_lock_irq(&phba->hbalock); 15098 phba->hba_flag &= ~FCF_TS_INPROG; 15099 spin_unlock_irq(&phba->hbalock); 15100 } 15101 return error; 15102 } 15103 15104 /** 15105 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 15106 * @phba: pointer to lpfc hba data structure. 15107 * @fcf_index: FCF table entry offset. 15108 * 15109 * This routine is invoked to read an FCF record indicated by @fcf_index 15110 * and to use it for FLOGI roundrobin FCF failover. 15111 * 15112 * Return 0 if the mailbox command is submitted successfully, none 0 15113 * otherwise. 15114 **/ 15115 int 15116 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15117 { 15118 int rc = 0, error; 15119 LPFC_MBOXQ_t *mboxq; 15120 15121 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15122 if (!mboxq) { 15123 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15124 "2763 Failed to allocate mbox for " 15125 "READ_FCF cmd\n"); 15126 error = -ENOMEM; 15127 goto fail_fcf_read; 15128 } 15129 /* Construct the read FCF record mailbox command */ 15130 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15131 if (rc) { 15132 error = -EINVAL; 15133 goto fail_fcf_read; 15134 } 15135 /* Issue the mailbox command asynchronously */ 15136 mboxq->vport = phba->pport; 15137 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 15138 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15139 if (rc == MBX_NOT_FINISHED) 15140 error = -EIO; 15141 else 15142 error = 0; 15143 15144 fail_fcf_read: 15145 if (error && mboxq) 15146 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15147 return error; 15148 } 15149 15150 /** 15151 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 15152 * @phba: pointer to lpfc hba data structure. 15153 * @fcf_index: FCF table entry offset. 15154 * 15155 * This routine is invoked to read an FCF record indicated by @fcf_index to 15156 * determine whether it's eligible for FLOGI roundrobin failover list. 15157 * 15158 * Return 0 if the mailbox command is submitted successfully, none 0 15159 * otherwise. 15160 **/ 15161 int 15162 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 15163 { 15164 int rc = 0, error; 15165 LPFC_MBOXQ_t *mboxq; 15166 15167 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15168 if (!mboxq) { 15169 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 15170 "2758 Failed to allocate mbox for " 15171 "READ_FCF cmd\n"); 15172 error = -ENOMEM; 15173 goto fail_fcf_read; 15174 } 15175 /* Construct the read FCF record mailbox command */ 15176 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 15177 if (rc) { 15178 error = -EINVAL; 15179 goto fail_fcf_read; 15180 } 15181 /* Issue the mailbox command asynchronously */ 15182 mboxq->vport = phba->pport; 15183 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 15184 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15185 if (rc == MBX_NOT_FINISHED) 15186 error = -EIO; 15187 else 15188 error = 0; 15189 15190 fail_fcf_read: 15191 if (error && mboxq) 15192 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15193 return error; 15194 } 15195 15196 /** 15197 * lpfc_check_next_fcf_pri 15198 * phba pointer to the lpfc_hba struct for this port. 15199 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 15200 * routine when the rr_bmask is empty. The FCF indecies are put into the 15201 * rr_bmask based on their priority level. Starting from the highest priority 15202 * to the lowest. The most likely FCF candidate will be in the highest 15203 * priority group. When this routine is called it searches the fcf_pri list for 15204 * next lowest priority group and repopulates the rr_bmask with only those 15205 * fcf_indexes. 15206 * returns: 15207 * 1=success 0=failure 15208 **/ 15209 int 15210 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 15211 { 15212 uint16_t next_fcf_pri; 15213 uint16_t last_index; 15214 struct lpfc_fcf_pri *fcf_pri; 15215 int rc; 15216 int ret = 0; 15217 15218 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 15219 LPFC_SLI4_FCF_TBL_INDX_MAX); 15220 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15221 "3060 Last IDX %d\n", last_index); 15222 if (list_empty(&phba->fcf.fcf_pri_list)) { 15223 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15224 "3061 Last IDX %d\n", last_index); 15225 return 0; /* Empty rr list */ 15226 } 15227 next_fcf_pri = 0; 15228 /* 15229 * Clear the rr_bmask and set all of the bits that are at this 15230 * priority. 15231 */ 15232 memset(phba->fcf.fcf_rr_bmask, 0, 15233 sizeof(*phba->fcf.fcf_rr_bmask)); 15234 spin_lock_irq(&phba->hbalock); 15235 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15236 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 15237 continue; 15238 /* 15239 * the 1st priority that has not FLOGI failed 15240 * will be the highest. 15241 */ 15242 if (!next_fcf_pri) 15243 next_fcf_pri = fcf_pri->fcf_rec.priority; 15244 spin_unlock_irq(&phba->hbalock); 15245 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15246 rc = lpfc_sli4_fcf_rr_index_set(phba, 15247 fcf_pri->fcf_rec.fcf_index); 15248 if (rc) 15249 return 0; 15250 } 15251 spin_lock_irq(&phba->hbalock); 15252 } 15253 /* 15254 * if next_fcf_pri was not set above and the list is not empty then 15255 * we have failed flogis on all of them. So reset flogi failed 15256 * and start at the begining. 15257 */ 15258 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 15259 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15260 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 15261 /* 15262 * the 1st priority that has not FLOGI failed 15263 * will be the highest. 15264 */ 15265 if (!next_fcf_pri) 15266 next_fcf_pri = fcf_pri->fcf_rec.priority; 15267 spin_unlock_irq(&phba->hbalock); 15268 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 15269 rc = lpfc_sli4_fcf_rr_index_set(phba, 15270 fcf_pri->fcf_rec.fcf_index); 15271 if (rc) 15272 return 0; 15273 } 15274 spin_lock_irq(&phba->hbalock); 15275 } 15276 } else 15277 ret = 1; 15278 spin_unlock_irq(&phba->hbalock); 15279 15280 return ret; 15281 } 15282 /** 15283 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 15284 * @phba: pointer to lpfc hba data structure. 15285 * 15286 * This routine is to get the next eligible FCF record index in a round 15287 * robin fashion. If the next eligible FCF record index equals to the 15288 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 15289 * shall be returned, otherwise, the next eligible FCF record's index 15290 * shall be returned. 15291 **/ 15292 uint16_t 15293 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 15294 { 15295 uint16_t next_fcf_index; 15296 15297 /* Search start from next bit of currently registered FCF index */ 15298 next_priority: 15299 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 15300 LPFC_SLI4_FCF_TBL_INDX_MAX; 15301 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15302 LPFC_SLI4_FCF_TBL_INDX_MAX, 15303 next_fcf_index); 15304 15305 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 15306 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15307 /* 15308 * If we have wrapped then we need to clear the bits that 15309 * have been tested so that we can detect when we should 15310 * change the priority level. 15311 */ 15312 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 15313 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 15314 } 15315 15316 15317 /* Check roundrobin failover list empty condition */ 15318 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 15319 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 15320 /* 15321 * If next fcf index is not found check if there are lower 15322 * Priority level fcf's in the fcf_priority list. 15323 * Set up the rr_bmask with all of the avaiable fcf bits 15324 * at that level and continue the selection process. 15325 */ 15326 if (lpfc_check_next_fcf_pri_level(phba)) 15327 goto next_priority; 15328 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15329 "2844 No roundrobin failover FCF available\n"); 15330 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 15331 return LPFC_FCOE_FCF_NEXT_NONE; 15332 else { 15333 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 15334 "3063 Only FCF available idx %d, flag %x\n", 15335 next_fcf_index, 15336 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 15337 return next_fcf_index; 15338 } 15339 } 15340 15341 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 15342 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 15343 LPFC_FCF_FLOGI_FAILED) 15344 goto next_priority; 15345 15346 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15347 "2845 Get next roundrobin failover FCF (x%x)\n", 15348 next_fcf_index); 15349 15350 return next_fcf_index; 15351 } 15352 15353 /** 15354 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 15355 * @phba: pointer to lpfc hba data structure. 15356 * 15357 * This routine sets the FCF record index in to the eligible bmask for 15358 * roundrobin failover search. It checks to make sure that the index 15359 * does not go beyond the range of the driver allocated bmask dimension 15360 * before setting the bit. 15361 * 15362 * Returns 0 if the index bit successfully set, otherwise, it returns 15363 * -EINVAL. 15364 **/ 15365 int 15366 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 15367 { 15368 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15369 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15370 "2610 FCF (x%x) reached driver's book " 15371 "keeping dimension:x%x\n", 15372 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15373 return -EINVAL; 15374 } 15375 /* Set the eligible FCF record index bmask */ 15376 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15377 15378 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15379 "2790 Set FCF (x%x) to roundrobin FCF failover " 15380 "bmask\n", fcf_index); 15381 15382 return 0; 15383 } 15384 15385 /** 15386 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 15387 * @phba: pointer to lpfc hba data structure. 15388 * 15389 * This routine clears the FCF record index from the eligible bmask for 15390 * roundrobin failover search. It checks to make sure that the index 15391 * does not go beyond the range of the driver allocated bmask dimension 15392 * before clearing the bit. 15393 **/ 15394 void 15395 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 15396 { 15397 struct lpfc_fcf_pri *fcf_pri; 15398 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15399 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15400 "2762 FCF (x%x) reached driver's book " 15401 "keeping dimension:x%x\n", 15402 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 15403 return; 15404 } 15405 /* Clear the eligible FCF record index bmask */ 15406 spin_lock_irq(&phba->hbalock); 15407 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15408 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 15409 list_del_init(&fcf_pri->list); 15410 break; 15411 } 15412 } 15413 spin_unlock_irq(&phba->hbalock); 15414 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 15415 15416 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15417 "2791 Clear FCF (x%x) from roundrobin failover " 15418 "bmask\n", fcf_index); 15419 } 15420 15421 /** 15422 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 15423 * @phba: pointer to lpfc hba data structure. 15424 * 15425 * This routine is the completion routine for the rediscover FCF table mailbox 15426 * command. If the mailbox command returned failure, it will try to stop the 15427 * FCF rediscover wait timer. 15428 **/ 15429 void 15430 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 15431 { 15432 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 15433 uint32_t shdr_status, shdr_add_status; 15434 15435 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 15436 15437 shdr_status = bf_get(lpfc_mbox_hdr_status, 15438 &redisc_fcf->header.cfg_shdr.response); 15439 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 15440 &redisc_fcf->header.cfg_shdr.response); 15441 if (shdr_status || shdr_add_status) { 15442 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15443 "2746 Requesting for FCF rediscovery failed " 15444 "status x%x add_status x%x\n", 15445 shdr_status, shdr_add_status); 15446 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 15447 spin_lock_irq(&phba->hbalock); 15448 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 15449 spin_unlock_irq(&phba->hbalock); 15450 /* 15451 * CVL event triggered FCF rediscover request failed, 15452 * last resort to re-try current registered FCF entry. 15453 */ 15454 lpfc_retry_pport_discovery(phba); 15455 } else { 15456 spin_lock_irq(&phba->hbalock); 15457 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 15458 spin_unlock_irq(&phba->hbalock); 15459 /* 15460 * DEAD FCF event triggered FCF rediscover request 15461 * failed, last resort to fail over as a link down 15462 * to FCF registration. 15463 */ 15464 lpfc_sli4_fcf_dead_failthrough(phba); 15465 } 15466 } else { 15467 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15468 "2775 Start FCF rediscover quiescent timer\n"); 15469 /* 15470 * Start FCF rediscovery wait timer for pending FCF 15471 * before rescan FCF record table. 15472 */ 15473 lpfc_fcf_redisc_wait_start_timer(phba); 15474 } 15475 15476 mempool_free(mbox, phba->mbox_mem_pool); 15477 } 15478 15479 /** 15480 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 15481 * @phba: pointer to lpfc hba data structure. 15482 * 15483 * This routine is invoked to request for rediscovery of the entire FCF table 15484 * by the port. 15485 **/ 15486 int 15487 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 15488 { 15489 LPFC_MBOXQ_t *mbox; 15490 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 15491 int rc, length; 15492 15493 /* Cancel retry delay timers to all vports before FCF rediscover */ 15494 lpfc_cancel_all_vport_retry_delay_timer(phba); 15495 15496 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15497 if (!mbox) { 15498 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15499 "2745 Failed to allocate mbox for " 15500 "requesting FCF rediscover.\n"); 15501 return -ENOMEM; 15502 } 15503 15504 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 15505 sizeof(struct lpfc_sli4_cfg_mhdr)); 15506 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15507 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 15508 length, LPFC_SLI4_MBX_EMBED); 15509 15510 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 15511 /* Set count to 0 for invalidating the entire FCF database */ 15512 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 15513 15514 /* Issue the mailbox command asynchronously */ 15515 mbox->vport = phba->pport; 15516 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 15517 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 15518 15519 if (rc == MBX_NOT_FINISHED) { 15520 mempool_free(mbox, phba->mbox_mem_pool); 15521 return -EIO; 15522 } 15523 return 0; 15524 } 15525 15526 /** 15527 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 15528 * @phba: pointer to lpfc hba data structure. 15529 * 15530 * This function is the failover routine as a last resort to the FCF DEAD 15531 * event when driver failed to perform fast FCF failover. 15532 **/ 15533 void 15534 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 15535 { 15536 uint32_t link_state; 15537 15538 /* 15539 * Last resort as FCF DEAD event failover will treat this as 15540 * a link down, but save the link state because we don't want 15541 * it to be changed to Link Down unless it is already down. 15542 */ 15543 link_state = phba->link_state; 15544 lpfc_linkdown(phba); 15545 phba->link_state = link_state; 15546 15547 /* Unregister FCF if no devices connected to it */ 15548 lpfc_unregister_unused_fcf(phba); 15549 } 15550 15551 /** 15552 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 15553 * @phba: pointer to lpfc hba data structure. 15554 * @rgn23_data: pointer to configure region 23 data. 15555 * 15556 * This function gets SLI3 port configure region 23 data through memory dump 15557 * mailbox command. When it successfully retrieves data, the size of the data 15558 * will be returned, otherwise, 0 will be returned. 15559 **/ 15560 static uint32_t 15561 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 15562 { 15563 LPFC_MBOXQ_t *pmb = NULL; 15564 MAILBOX_t *mb; 15565 uint32_t offset = 0; 15566 int rc; 15567 15568 if (!rgn23_data) 15569 return 0; 15570 15571 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15572 if (!pmb) { 15573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15574 "2600 failed to allocate mailbox memory\n"); 15575 return 0; 15576 } 15577 mb = &pmb->u.mb; 15578 15579 do { 15580 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 15581 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 15582 15583 if (rc != MBX_SUCCESS) { 15584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15585 "2601 failed to read config " 15586 "region 23, rc 0x%x Status 0x%x\n", 15587 rc, mb->mbxStatus); 15588 mb->un.varDmp.word_cnt = 0; 15589 } 15590 /* 15591 * dump mem may return a zero when finished or we got a 15592 * mailbox error, either way we are done. 15593 */ 15594 if (mb->un.varDmp.word_cnt == 0) 15595 break; 15596 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 15597 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 15598 15599 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 15600 rgn23_data + offset, 15601 mb->un.varDmp.word_cnt); 15602 offset += mb->un.varDmp.word_cnt; 15603 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 15604 15605 mempool_free(pmb, phba->mbox_mem_pool); 15606 return offset; 15607 } 15608 15609 /** 15610 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 15611 * @phba: pointer to lpfc hba data structure. 15612 * @rgn23_data: pointer to configure region 23 data. 15613 * 15614 * This function gets SLI4 port configure region 23 data through memory dump 15615 * mailbox command. When it successfully retrieves data, the size of the data 15616 * will be returned, otherwise, 0 will be returned. 15617 **/ 15618 static uint32_t 15619 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 15620 { 15621 LPFC_MBOXQ_t *mboxq = NULL; 15622 struct lpfc_dmabuf *mp = NULL; 15623 struct lpfc_mqe *mqe; 15624 uint32_t data_length = 0; 15625 int rc; 15626 15627 if (!rgn23_data) 15628 return 0; 15629 15630 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15631 if (!mboxq) { 15632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15633 "3105 failed to allocate mailbox memory\n"); 15634 return 0; 15635 } 15636 15637 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 15638 goto out; 15639 mqe = &mboxq->u.mqe; 15640 mp = (struct lpfc_dmabuf *) mboxq->context1; 15641 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 15642 if (rc) 15643 goto out; 15644 data_length = mqe->un.mb_words[5]; 15645 if (data_length == 0) 15646 goto out; 15647 if (data_length > DMP_RGN23_SIZE) { 15648 data_length = 0; 15649 goto out; 15650 } 15651 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 15652 out: 15653 mempool_free(mboxq, phba->mbox_mem_pool); 15654 if (mp) { 15655 lpfc_mbuf_free(phba, mp->virt, mp->phys); 15656 kfree(mp); 15657 } 15658 return data_length; 15659 } 15660 15661 /** 15662 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 15663 * @phba: pointer to lpfc hba data structure. 15664 * 15665 * This function read region 23 and parse TLV for port status to 15666 * decide if the user disaled the port. If the TLV indicates the 15667 * port is disabled, the hba_flag is set accordingly. 15668 **/ 15669 void 15670 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 15671 { 15672 uint8_t *rgn23_data = NULL; 15673 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 15674 uint32_t offset = 0; 15675 15676 /* Get adapter Region 23 data */ 15677 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 15678 if (!rgn23_data) 15679 goto out; 15680 15681 if (phba->sli_rev < LPFC_SLI_REV4) 15682 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 15683 else { 15684 if_type = bf_get(lpfc_sli_intf_if_type, 15685 &phba->sli4_hba.sli_intf); 15686 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 15687 goto out; 15688 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 15689 } 15690 15691 if (!data_size) 15692 goto out; 15693 15694 /* Check the region signature first */ 15695 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 15696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15697 "2619 Config region 23 has bad signature\n"); 15698 goto out; 15699 } 15700 offset += 4; 15701 15702 /* Check the data structure version */ 15703 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 15704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15705 "2620 Config region 23 has bad version\n"); 15706 goto out; 15707 } 15708 offset += 4; 15709 15710 /* Parse TLV entries in the region */ 15711 while (offset < data_size) { 15712 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 15713 break; 15714 /* 15715 * If the TLV is not driver specific TLV or driver id is 15716 * not linux driver id, skip the record. 15717 */ 15718 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 15719 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 15720 (rgn23_data[offset + 3] != 0)) { 15721 offset += rgn23_data[offset + 1] * 4 + 4; 15722 continue; 15723 } 15724 15725 /* Driver found a driver specific TLV in the config region */ 15726 sub_tlv_len = rgn23_data[offset + 1] * 4; 15727 offset += 4; 15728 tlv_offset = 0; 15729 15730 /* 15731 * Search for configured port state sub-TLV. 15732 */ 15733 while ((offset < data_size) && 15734 (tlv_offset < sub_tlv_len)) { 15735 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 15736 offset += 4; 15737 tlv_offset += 4; 15738 break; 15739 } 15740 if (rgn23_data[offset] != PORT_STE_TYPE) { 15741 offset += rgn23_data[offset + 1] * 4 + 4; 15742 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 15743 continue; 15744 } 15745 15746 /* This HBA contains PORT_STE configured */ 15747 if (!rgn23_data[offset + 2]) 15748 phba->hba_flag |= LINK_DISABLED; 15749 15750 goto out; 15751 } 15752 } 15753 15754 out: 15755 kfree(rgn23_data); 15756 return; 15757 } 15758 15759 /** 15760 * lpfc_wr_object - write an object to the firmware 15761 * @phba: HBA structure that indicates port to create a queue on. 15762 * @dmabuf_list: list of dmabufs to write to the port. 15763 * @size: the total byte value of the objects to write to the port. 15764 * @offset: the current offset to be used to start the transfer. 15765 * 15766 * This routine will create a wr_object mailbox command to send to the port. 15767 * the mailbox command will be constructed using the dma buffers described in 15768 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 15769 * BDEs that the imbedded mailbox can support. The @offset variable will be 15770 * used to indicate the starting offset of the transfer and will also return 15771 * the offset after the write object mailbox has completed. @size is used to 15772 * determine the end of the object and whether the eof bit should be set. 15773 * 15774 * Return 0 is successful and offset will contain the the new offset to use 15775 * for the next write. 15776 * Return negative value for error cases. 15777 **/ 15778 int 15779 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 15780 uint32_t size, uint32_t *offset) 15781 { 15782 struct lpfc_mbx_wr_object *wr_object; 15783 LPFC_MBOXQ_t *mbox; 15784 int rc = 0, i = 0; 15785 uint32_t shdr_status, shdr_add_status; 15786 uint32_t mbox_tmo; 15787 union lpfc_sli4_cfg_shdr *shdr; 15788 struct lpfc_dmabuf *dmabuf; 15789 uint32_t written = 0; 15790 15791 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15792 if (!mbox) 15793 return -ENOMEM; 15794 15795 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15796 LPFC_MBOX_OPCODE_WRITE_OBJECT, 15797 sizeof(struct lpfc_mbx_wr_object) - 15798 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 15799 15800 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 15801 wr_object->u.request.write_offset = *offset; 15802 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 15803 wr_object->u.request.object_name[0] = 15804 cpu_to_le32(wr_object->u.request.object_name[0]); 15805 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 15806 list_for_each_entry(dmabuf, dmabuf_list, list) { 15807 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 15808 break; 15809 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 15810 wr_object->u.request.bde[i].addrHigh = 15811 putPaddrHigh(dmabuf->phys); 15812 if (written + SLI4_PAGE_SIZE >= size) { 15813 wr_object->u.request.bde[i].tus.f.bdeSize = 15814 (size - written); 15815 written += (size - written); 15816 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 15817 } else { 15818 wr_object->u.request.bde[i].tus.f.bdeSize = 15819 SLI4_PAGE_SIZE; 15820 written += SLI4_PAGE_SIZE; 15821 } 15822 i++; 15823 } 15824 wr_object->u.request.bde_count = i; 15825 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 15826 if (!phba->sli4_hba.intr_enable) 15827 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15828 else { 15829 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 15830 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 15831 } 15832 /* The IOCTL status is embedded in the mailbox subheader. */ 15833 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 15834 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15835 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15836 if (rc != MBX_TIMEOUT) 15837 mempool_free(mbox, phba->mbox_mem_pool); 15838 if (shdr_status || shdr_add_status || rc) { 15839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15840 "3025 Write Object mailbox failed with " 15841 "status x%x add_status x%x, mbx status x%x\n", 15842 shdr_status, shdr_add_status, rc); 15843 rc = -ENXIO; 15844 } else 15845 *offset += wr_object->u.response.actual_write_length; 15846 return rc; 15847 } 15848 15849 /** 15850 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 15851 * @vport: pointer to vport data structure. 15852 * 15853 * This function iterate through the mailboxq and clean up all REG_LOGIN 15854 * and REG_VPI mailbox commands associated with the vport. This function 15855 * is called when driver want to restart discovery of the vport due to 15856 * a Clear Virtual Link event. 15857 **/ 15858 void 15859 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 15860 { 15861 struct lpfc_hba *phba = vport->phba; 15862 LPFC_MBOXQ_t *mb, *nextmb; 15863 struct lpfc_dmabuf *mp; 15864 struct lpfc_nodelist *ndlp; 15865 struct lpfc_nodelist *act_mbx_ndlp = NULL; 15866 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 15867 LIST_HEAD(mbox_cmd_list); 15868 uint8_t restart_loop; 15869 15870 /* Clean up internally queued mailbox commands with the vport */ 15871 spin_lock_irq(&phba->hbalock); 15872 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 15873 if (mb->vport != vport) 15874 continue; 15875 15876 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15877 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15878 continue; 15879 15880 list_del(&mb->list); 15881 list_add_tail(&mb->list, &mbox_cmd_list); 15882 } 15883 /* Clean up active mailbox command with the vport */ 15884 mb = phba->sli.mbox_active; 15885 if (mb && (mb->vport == vport)) { 15886 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 15887 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 15888 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15889 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15890 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 15891 /* Put reference count for delayed processing */ 15892 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 15893 /* Unregister the RPI when mailbox complete */ 15894 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15895 } 15896 } 15897 /* Cleanup any mailbox completions which are not yet processed */ 15898 do { 15899 restart_loop = 0; 15900 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 15901 /* 15902 * If this mailox is already processed or it is 15903 * for another vport ignore it. 15904 */ 15905 if ((mb->vport != vport) || 15906 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 15907 continue; 15908 15909 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15910 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15911 continue; 15912 15913 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15914 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15915 ndlp = (struct lpfc_nodelist *)mb->context2; 15916 /* Unregister the RPI when mailbox complete */ 15917 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15918 restart_loop = 1; 15919 spin_unlock_irq(&phba->hbalock); 15920 spin_lock(shost->host_lock); 15921 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15922 spin_unlock(shost->host_lock); 15923 spin_lock_irq(&phba->hbalock); 15924 break; 15925 } 15926 } 15927 } while (restart_loop); 15928 15929 spin_unlock_irq(&phba->hbalock); 15930 15931 /* Release the cleaned-up mailbox commands */ 15932 while (!list_empty(&mbox_cmd_list)) { 15933 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 15934 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15935 mp = (struct lpfc_dmabuf *) (mb->context1); 15936 if (mp) { 15937 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 15938 kfree(mp); 15939 } 15940 ndlp = (struct lpfc_nodelist *) mb->context2; 15941 mb->context2 = NULL; 15942 if (ndlp) { 15943 spin_lock(shost->host_lock); 15944 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15945 spin_unlock(shost->host_lock); 15946 lpfc_nlp_put(ndlp); 15947 } 15948 } 15949 mempool_free(mb, phba->mbox_mem_pool); 15950 } 15951 15952 /* Release the ndlp with the cleaned-up active mailbox command */ 15953 if (act_mbx_ndlp) { 15954 spin_lock(shost->host_lock); 15955 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15956 spin_unlock(shost->host_lock); 15957 lpfc_nlp_put(act_mbx_ndlp); 15958 } 15959 } 15960 15961 /** 15962 * lpfc_drain_txq - Drain the txq 15963 * @phba: Pointer to HBA context object. 15964 * 15965 * This function attempt to submit IOCBs on the txq 15966 * to the adapter. For SLI4 adapters, the txq contains 15967 * ELS IOCBs that have been deferred because the there 15968 * are no SGLs. This congestion can occur with large 15969 * vport counts during node discovery. 15970 **/ 15971 15972 uint32_t 15973 lpfc_drain_txq(struct lpfc_hba *phba) 15974 { 15975 LIST_HEAD(completions); 15976 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 15977 struct lpfc_iocbq *piocbq = 0; 15978 unsigned long iflags = 0; 15979 char *fail_msg = NULL; 15980 struct lpfc_sglq *sglq; 15981 union lpfc_wqe wqe; 15982 15983 spin_lock_irqsave(&phba->hbalock, iflags); 15984 if (pring->txq_cnt > pring->txq_max) 15985 pring->txq_max = pring->txq_cnt; 15986 15987 spin_unlock_irqrestore(&phba->hbalock, iflags); 15988 15989 while (pring->txq_cnt) { 15990 spin_lock_irqsave(&phba->hbalock, iflags); 15991 15992 piocbq = lpfc_sli_ringtx_get(phba, pring); 15993 sglq = __lpfc_sli_get_sglq(phba, piocbq); 15994 if (!sglq) { 15995 __lpfc_sli_ringtx_put(phba, pring, piocbq); 15996 spin_unlock_irqrestore(&phba->hbalock, iflags); 15997 break; 15998 } else { 15999 if (!piocbq) { 16000 /* The txq_cnt out of sync. This should 16001 * never happen 16002 */ 16003 sglq = __lpfc_clear_active_sglq(phba, 16004 sglq->sli4_lxritag); 16005 spin_unlock_irqrestore(&phba->hbalock, iflags); 16006 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16007 "2823 txq empty and txq_cnt is %d\n ", 16008 pring->txq_cnt); 16009 break; 16010 } 16011 } 16012 16013 /* The xri and iocb resources secured, 16014 * attempt to issue request 16015 */ 16016 piocbq->sli4_lxritag = sglq->sli4_lxritag; 16017 piocbq->sli4_xritag = sglq->sli4_xritag; 16018 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 16019 fail_msg = "to convert bpl to sgl"; 16020 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 16021 fail_msg = "to convert iocb to wqe"; 16022 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 16023 fail_msg = " - Wq is full"; 16024 else 16025 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 16026 16027 if (fail_msg) { 16028 /* Failed means we can't issue and need to cancel */ 16029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16030 "2822 IOCB failed %s iotag 0x%x " 16031 "xri 0x%x\n", 16032 fail_msg, 16033 piocbq->iotag, piocbq->sli4_xritag); 16034 list_add_tail(&piocbq->list, &completions); 16035 } 16036 spin_unlock_irqrestore(&phba->hbalock, iflags); 16037 } 16038 16039 /* Cancel all the IOCBs that cannot be issued */ 16040 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 16041 IOERR_SLI_ABORTED); 16042 16043 return pring->txq_cnt; 16044 } 16045