1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 69 struct lpfc_cqe *); 70 71 static IOCB_t * 72 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 73 { 74 return &iocbq->iocb; 75 } 76 77 /** 78 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 79 * @q: The Work Queue to operate on. 80 * @wqe: The work Queue Entry to put on the Work queue. 81 * 82 * This routine will copy the contents of @wqe to the next available entry on 83 * the @q. This function will then ring the Work Queue Doorbell to signal the 84 * HBA to start processing the Work Queue Entry. This function returns 0 if 85 * successful. If no entries are available on @q then this function will return 86 * -ENOMEM. 87 * The caller is expected to hold the hbalock when calling this routine. 88 **/ 89 static uint32_t 90 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 91 { 92 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; 93 struct lpfc_register doorbell; 94 uint32_t host_index; 95 96 /* If the host has not yet processed the next entry then we are done */ 97 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 98 return -ENOMEM; 99 /* set consumption flag every once in a while */ 100 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) 101 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 102 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 103 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 104 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 105 106 /* Update the host index before invoking device */ 107 host_index = q->host_index; 108 q->host_index = ((q->host_index + 1) % q->entry_count); 109 110 /* Ring Doorbell */ 111 doorbell.word0 = 0; 112 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 113 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 114 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 115 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 116 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ 117 118 return 0; 119 } 120 121 /** 122 * lpfc_sli4_wq_release - Updates internal hba index for WQ 123 * @q: The Work Queue to operate on. 124 * @index: The index to advance the hba index to. 125 * 126 * This routine will update the HBA index of a queue to reflect consumption of 127 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 128 * an entry the host calls this function to update the queue's internal 129 * pointers. This routine returns the number of entries that were consumed by 130 * the HBA. 131 **/ 132 static uint32_t 133 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 134 { 135 uint32_t released = 0; 136 137 if (q->hba_index == index) 138 return 0; 139 do { 140 q->hba_index = ((q->hba_index + 1) % q->entry_count); 141 released++; 142 } while (q->hba_index != index); 143 return released; 144 } 145 146 /** 147 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 148 * @q: The Mailbox Queue to operate on. 149 * @wqe: The Mailbox Queue Entry to put on the Work queue. 150 * 151 * This routine will copy the contents of @mqe to the next available entry on 152 * the @q. This function will then ring the Work Queue Doorbell to signal the 153 * HBA to start processing the Work Queue Entry. This function returns 0 if 154 * successful. If no entries are available on @q then this function will return 155 * -ENOMEM. 156 * The caller is expected to hold the hbalock when calling this routine. 157 **/ 158 static uint32_t 159 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 160 { 161 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; 162 struct lpfc_register doorbell; 163 uint32_t host_index; 164 165 /* If the host has not yet processed the next entry then we are done */ 166 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 167 return -ENOMEM; 168 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 169 /* Save off the mailbox pointer for completion */ 170 q->phba->mbox = (MAILBOX_t *)temp_mqe; 171 172 /* Update the host index before invoking device */ 173 host_index = q->host_index; 174 q->host_index = ((q->host_index + 1) % q->entry_count); 175 176 /* Ring Doorbell */ 177 doorbell.word0 = 0; 178 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 179 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 180 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 181 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ 182 return 0; 183 } 184 185 /** 186 * lpfc_sli4_mq_release - Updates internal hba index for MQ 187 * @q: The Mailbox Queue to operate on. 188 * 189 * This routine will update the HBA index of a queue to reflect consumption of 190 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 191 * an entry the host calls this function to update the queue's internal 192 * pointers. This routine returns the number of entries that were consumed by 193 * the HBA. 194 **/ 195 static uint32_t 196 lpfc_sli4_mq_release(struct lpfc_queue *q) 197 { 198 /* Clear the mailbox pointer for completion */ 199 q->phba->mbox = NULL; 200 q->hba_index = ((q->hba_index + 1) % q->entry_count); 201 return 1; 202 } 203 204 /** 205 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 206 * @q: The Event Queue to get the first valid EQE from 207 * 208 * This routine will get the first valid Event Queue Entry from @q, update 209 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 210 * the Queue (no more work to do), or the Queue is full of EQEs that have been 211 * processed, but not popped back to the HBA then this routine will return NULL. 212 **/ 213 static struct lpfc_eqe * 214 lpfc_sli4_eq_get(struct lpfc_queue *q) 215 { 216 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 217 218 /* If the next EQE is not valid then we are done */ 219 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 220 return NULL; 221 /* If the host has not yet processed the next entry then we are done */ 222 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 223 return NULL; 224 225 q->hba_index = ((q->hba_index + 1) % q->entry_count); 226 return eqe; 227 } 228 229 /** 230 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 231 * @q: The Event Queue that the host has completed processing for. 232 * @arm: Indicates whether the host wants to arms this CQ. 233 * 234 * This routine will mark all Event Queue Entries on @q, from the last 235 * known completed entry to the last entry that was processed, as completed 236 * by clearing the valid bit for each completion queue entry. Then it will 237 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 238 * The internal host index in the @q will be updated by this routine to indicate 239 * that the host has finished processing the entries. The @arm parameter 240 * indicates that the queue should be rearmed when ringing the doorbell. 241 * 242 * This function will return the number of EQEs that were popped. 243 **/ 244 uint32_t 245 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 246 { 247 uint32_t released = 0; 248 struct lpfc_eqe *temp_eqe; 249 struct lpfc_register doorbell; 250 251 /* while there are valid entries */ 252 while (q->hba_index != q->host_index) { 253 temp_eqe = q->qe[q->host_index].eqe; 254 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 255 released++; 256 q->host_index = ((q->host_index + 1) % q->entry_count); 257 } 258 if (unlikely(released == 0 && !arm)) 259 return 0; 260 261 /* ring doorbell for number popped */ 262 doorbell.word0 = 0; 263 if (arm) { 264 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 265 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 266 } 267 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 268 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 269 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 270 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 271 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 272 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 273 readl(q->phba->sli4_hba.EQCQDBregaddr); 274 return released; 275 } 276 277 /** 278 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 279 * @q: The Completion Queue to get the first valid CQE from 280 * 281 * This routine will get the first valid Completion Queue Entry from @q, update 282 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 283 * the Queue (no more work to do), or the Queue is full of CQEs that have been 284 * processed, but not popped back to the HBA then this routine will return NULL. 285 **/ 286 static struct lpfc_cqe * 287 lpfc_sli4_cq_get(struct lpfc_queue *q) 288 { 289 struct lpfc_cqe *cqe; 290 291 /* If the next CQE is not valid then we are done */ 292 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 293 return NULL; 294 /* If the host has not yet processed the next entry then we are done */ 295 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 296 return NULL; 297 298 cqe = q->qe[q->hba_index].cqe; 299 q->hba_index = ((q->hba_index + 1) % q->entry_count); 300 return cqe; 301 } 302 303 /** 304 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 305 * @q: The Completion Queue that the host has completed processing for. 306 * @arm: Indicates whether the host wants to arms this CQ. 307 * 308 * This routine will mark all Completion queue entries on @q, from the last 309 * known completed entry to the last entry that was processed, as completed 310 * by clearing the valid bit for each completion queue entry. Then it will 311 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 312 * The internal host index in the @q will be updated by this routine to indicate 313 * that the host has finished processing the entries. The @arm parameter 314 * indicates that the queue should be rearmed when ringing the doorbell. 315 * 316 * This function will return the number of CQEs that were released. 317 **/ 318 uint32_t 319 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 320 { 321 uint32_t released = 0; 322 struct lpfc_cqe *temp_qe; 323 struct lpfc_register doorbell; 324 325 /* while there are valid entries */ 326 while (q->hba_index != q->host_index) { 327 temp_qe = q->qe[q->host_index].cqe; 328 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 329 released++; 330 q->host_index = ((q->host_index + 1) % q->entry_count); 331 } 332 if (unlikely(released == 0 && !arm)) 333 return 0; 334 335 /* ring doorbell for number popped */ 336 doorbell.word0 = 0; 337 if (arm) 338 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 339 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 340 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 341 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); 342 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 343 return released; 344 } 345 346 /** 347 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 348 * @q: The Header Receive Queue to operate on. 349 * @wqe: The Receive Queue Entry to put on the Receive queue. 350 * 351 * This routine will copy the contents of @wqe to the next available entry on 352 * the @q. This function will then ring the Receive Queue Doorbell to signal the 353 * HBA to start processing the Receive Queue Entry. This function returns the 354 * index that the rqe was copied to if successful. If no entries are available 355 * on @q then this function will return -ENOMEM. 356 * The caller is expected to hold the hbalock when calling this routine. 357 **/ 358 static int 359 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 360 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 361 { 362 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; 363 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; 364 struct lpfc_register doorbell; 365 int put_index = hq->host_index; 366 367 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 368 return -EINVAL; 369 if (hq->host_index != dq->host_index) 370 return -EINVAL; 371 /* If the host has not yet processed the next entry then we are done */ 372 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 373 return -EBUSY; 374 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 375 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 376 377 /* Update the host index to point to the next slot */ 378 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 379 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 380 381 /* Ring The Header Receive Queue Doorbell */ 382 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { 383 doorbell.word0 = 0; 384 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 385 LPFC_RQ_POST_BATCH); 386 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 387 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 388 } 389 return put_index; 390 } 391 392 /** 393 * lpfc_sli4_rq_release - Updates internal hba index for RQ 394 * @q: The Header Receive Queue to operate on. 395 * 396 * This routine will update the HBA index of a queue to reflect consumption of 397 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 398 * consumed an entry the host calls this function to update the queue's 399 * internal pointers. This routine returns the number of entries that were 400 * consumed by the HBA. 401 **/ 402 static uint32_t 403 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 404 { 405 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 406 return 0; 407 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 408 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 409 return 1; 410 } 411 412 /** 413 * lpfc_cmd_iocb - Get next command iocb entry in the ring 414 * @phba: Pointer to HBA context object. 415 * @pring: Pointer to driver SLI ring object. 416 * 417 * This function returns pointer to next command iocb entry 418 * in the command ring. The caller must hold hbalock to prevent 419 * other threads consume the next command iocb. 420 * SLI-2/SLI-3 provide different sized iocbs. 421 **/ 422 static inline IOCB_t * 423 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 424 { 425 return (IOCB_t *) (((char *) pring->cmdringaddr) + 426 pring->cmdidx * phba->iocb_cmd_size); 427 } 428 429 /** 430 * lpfc_resp_iocb - Get next response iocb entry in the ring 431 * @phba: Pointer to HBA context object. 432 * @pring: Pointer to driver SLI ring object. 433 * 434 * This function returns pointer to next response iocb entry 435 * in the response ring. The caller must hold hbalock to make sure 436 * that no other thread consume the next response iocb. 437 * SLI-2/SLI-3 provide different sized iocbs. 438 **/ 439 static inline IOCB_t * 440 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 441 { 442 return (IOCB_t *) (((char *) pring->rspringaddr) + 443 pring->rspidx * phba->iocb_rsp_size); 444 } 445 446 /** 447 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 448 * @phba: Pointer to HBA context object. 449 * 450 * This function is called with hbalock held. This function 451 * allocates a new driver iocb object from the iocb pool. If the 452 * allocation is successful, it returns pointer to the newly 453 * allocated iocb object else it returns NULL. 454 **/ 455 static struct lpfc_iocbq * 456 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 457 { 458 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 459 struct lpfc_iocbq * iocbq = NULL; 460 461 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 462 if (iocbq) 463 phba->iocb_cnt++; 464 if (phba->iocb_cnt > phba->iocb_max) 465 phba->iocb_max = phba->iocb_cnt; 466 return iocbq; 467 } 468 469 /** 470 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 471 * @phba: Pointer to HBA context object. 472 * @xritag: XRI value. 473 * 474 * This function clears the sglq pointer from the array of acive 475 * sglq's. The xritag that is passed in is used to index into the 476 * array. Before the xritag can be used it needs to be adjusted 477 * by subtracting the xribase. 478 * 479 * Returns sglq ponter = success, NULL = Failure. 480 **/ 481 static struct lpfc_sglq * 482 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 483 { 484 struct lpfc_sglq *sglq; 485 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 487 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 488 return sglq; 489 } 490 491 /** 492 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 493 * @phba: Pointer to HBA context object. 494 * @xritag: XRI value. 495 * 496 * This function returns the sglq pointer from the array of acive 497 * sglq's. The xritag that is passed in is used to index into the 498 * array. Before the xritag can be used it needs to be adjusted 499 * by subtracting the xribase. 500 * 501 * Returns sglq ponter = success, NULL = Failure. 502 **/ 503 struct lpfc_sglq * 504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 505 { 506 struct lpfc_sglq *sglq; 507 508 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 509 return sglq; 510 } 511 512 /** 513 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap. 514 * @phba: Pointer to HBA context object. 515 * @ndlp: nodelist pointer for this target. 516 * @xritag: xri used in this exchange. 517 * @rxid: Remote Exchange ID. 518 * @send_rrq: Flag used to determine if we should send rrq els cmd. 519 * 520 * This function is called with hbalock held. 521 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an 522 * rrq struct and adds it to the active_rrq_list. 523 * 524 * returns 0 for rrq slot for this xri 525 * < 0 Were not able to get rrq mem or invalid parameter. 526 **/ 527 static int 528 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 529 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 530 { 531 struct lpfc_node_rrq *rrq; 532 int empty; 533 uint32_t did = 0; 534 535 536 if (!ndlp) 537 return -EINVAL; 538 539 if (!phba->cfg_enable_rrq) 540 return -EINVAL; 541 542 if (phba->pport->load_flag & FC_UNLOADING) { 543 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 544 goto out; 545 } 546 did = ndlp->nlp_DID; 547 548 /* 549 * set the active bit even if there is no mem available. 550 */ 551 if (NLP_CHK_FREE_REQ(ndlp)) 552 goto out; 553 554 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 555 goto out; 556 557 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 558 goto out; 559 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 561 if (rrq) { 562 rrq->send_rrq = send_rrq; 563 rrq->xritag = xritag; 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 565 rrq->ndlp = ndlp; 566 rrq->nlp_DID = ndlp->nlp_DID; 567 rrq->vport = ndlp->vport; 568 rrq->rxid = rxid; 569 empty = list_empty(&phba->active_rrq_list); 570 rrq->send_rrq = send_rrq; 571 list_add_tail(&rrq->list, &phba->active_rrq_list); 572 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { 573 phba->hba_flag |= HBA_RRQ_ACTIVE; 574 if (empty) 575 lpfc_worker_wake_up(phba); 576 } 577 return 0; 578 } 579 out: 580 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 581 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 582 " DID:0x%x Send:%d\n", 583 xritag, rxid, did, send_rrq); 584 return -EINVAL; 585 } 586 587 /** 588 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 589 * @phba: Pointer to HBA context object. 590 * @xritag: xri used in this exchange. 591 * @rrq: The RRQ to be cleared. 592 * 593 **/ 594 void 595 lpfc_clr_rrq_active(struct lpfc_hba *phba, 596 uint16_t xritag, 597 struct lpfc_node_rrq *rrq) 598 { 599 struct lpfc_nodelist *ndlp = NULL; 600 601 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 602 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 603 604 /* The target DID could have been swapped (cable swap) 605 * we should use the ndlp from the findnode if it is 606 * available. 607 */ 608 if ((!ndlp) && rrq->ndlp) 609 ndlp = rrq->ndlp; 610 611 if (!ndlp) 612 goto out; 613 614 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { 615 rrq->send_rrq = 0; 616 rrq->xritag = 0; 617 rrq->rrq_stop_time = 0; 618 } 619 out: 620 mempool_free(rrq, phba->rrq_pool); 621 } 622 623 /** 624 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 625 * @phba: Pointer to HBA context object. 626 * 627 * This function is called with hbalock held. This function 628 * Checks if stop_time (ratov from setting rrq active) has 629 * been reached, if it has and the send_rrq flag is set then 630 * it will call lpfc_send_rrq. If the send_rrq flag is not set 631 * then it will just call the routine to clear the rrq and 632 * free the rrq resource. 633 * The timer is set to the next rrq that is going to expire before 634 * leaving the routine. 635 * 636 **/ 637 void 638 lpfc_handle_rrq_active(struct lpfc_hba *phba) 639 { 640 struct lpfc_node_rrq *rrq; 641 struct lpfc_node_rrq *nextrrq; 642 unsigned long next_time; 643 unsigned long iflags; 644 LIST_HEAD(send_rrq); 645 646 spin_lock_irqsave(&phba->hbalock, iflags); 647 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 648 next_time = jiffies + HZ * (phba->fc_ratov + 1); 649 list_for_each_entry_safe(rrq, nextrrq, 650 &phba->active_rrq_list, list) { 651 if (time_after(jiffies, rrq->rrq_stop_time)) 652 list_move(&rrq->list, &send_rrq); 653 else if (time_before(rrq->rrq_stop_time, next_time)) 654 next_time = rrq->rrq_stop_time; 655 } 656 spin_unlock_irqrestore(&phba->hbalock, iflags); 657 if (!list_empty(&phba->active_rrq_list)) 658 mod_timer(&phba->rrq_tmr, next_time); 659 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 660 list_del(&rrq->list); 661 if (!rrq->send_rrq) 662 /* this call will free the rrq */ 663 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 664 else if (lpfc_send_rrq(phba, rrq)) { 665 /* if we send the rrq then the completion handler 666 * will clear the bit in the xribitmap. 667 */ 668 lpfc_clr_rrq_active(phba, rrq->xritag, 669 rrq); 670 } 671 } 672 } 673 674 /** 675 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 676 * @vport: Pointer to vport context object. 677 * @xri: The xri used in the exchange. 678 * @did: The targets DID for this exchange. 679 * 680 * returns NULL = rrq not found in the phba->active_rrq_list. 681 * rrq = rrq for this xri and target. 682 **/ 683 struct lpfc_node_rrq * 684 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 685 { 686 struct lpfc_hba *phba = vport->phba; 687 struct lpfc_node_rrq *rrq; 688 struct lpfc_node_rrq *nextrrq; 689 unsigned long iflags; 690 691 if (phba->sli_rev != LPFC_SLI_REV4) 692 return NULL; 693 spin_lock_irqsave(&phba->hbalock, iflags); 694 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 695 if (rrq->vport == vport && rrq->xritag == xri && 696 rrq->nlp_DID == did){ 697 list_del(&rrq->list); 698 spin_unlock_irqrestore(&phba->hbalock, iflags); 699 return rrq; 700 } 701 } 702 spin_unlock_irqrestore(&phba->hbalock, iflags); 703 return NULL; 704 } 705 706 /** 707 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 708 * @vport: Pointer to vport context object. 709 * @ndlp: Pointer to the lpfc_node_list structure. 710 * If ndlp is NULL Remove all active RRQs for this vport from the 711 * phba->active_rrq_list and clear the rrq. 712 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 713 **/ 714 void 715 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 716 717 { 718 struct lpfc_hba *phba = vport->phba; 719 struct lpfc_node_rrq *rrq; 720 struct lpfc_node_rrq *nextrrq; 721 unsigned long iflags; 722 LIST_HEAD(rrq_list); 723 724 if (phba->sli_rev != LPFC_SLI_REV4) 725 return; 726 if (!ndlp) { 727 lpfc_sli4_vport_delete_els_xri_aborted(vport); 728 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 729 } 730 spin_lock_irqsave(&phba->hbalock, iflags); 731 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 732 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 733 list_move(&rrq->list, &rrq_list); 734 spin_unlock_irqrestore(&phba->hbalock, iflags); 735 736 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 737 list_del(&rrq->list); 738 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 739 } 740 } 741 742 /** 743 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 744 * @phba: Pointer to HBA context object. 745 * 746 * Remove all rrqs from the phba->active_rrq_list and free them by 747 * calling __lpfc_clr_active_rrq 748 * 749 **/ 750 void 751 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 752 { 753 struct lpfc_node_rrq *rrq; 754 struct lpfc_node_rrq *nextrrq; 755 unsigned long next_time; 756 unsigned long iflags; 757 LIST_HEAD(rrq_list); 758 759 if (phba->sli_rev != LPFC_SLI_REV4) 760 return; 761 spin_lock_irqsave(&phba->hbalock, iflags); 762 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 763 next_time = jiffies + HZ * (phba->fc_ratov * 2); 764 list_splice_init(&phba->active_rrq_list, &rrq_list); 765 spin_unlock_irqrestore(&phba->hbalock, iflags); 766 767 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 768 list_del(&rrq->list); 769 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 770 } 771 if (!list_empty(&phba->active_rrq_list)) 772 mod_timer(&phba->rrq_tmr, next_time); 773 } 774 775 776 /** 777 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 778 * @phba: Pointer to HBA context object. 779 * @ndlp: Targets nodelist pointer for this exchange. 780 * @xritag the xri in the bitmap to test. 781 * 782 * This function is called with hbalock held. This function 783 * returns 0 = rrq not active for this xri 784 * 1 = rrq is valid for this xri. 785 **/ 786 int 787 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 788 uint16_t xritag) 789 { 790 if (!ndlp) 791 return 0; 792 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) 793 return 1; 794 else 795 return 0; 796 } 797 798 /** 799 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 800 * @phba: Pointer to HBA context object. 801 * @ndlp: nodelist pointer for this target. 802 * @xritag: xri used in this exchange. 803 * @rxid: Remote Exchange ID. 804 * @send_rrq: Flag used to determine if we should send rrq els cmd. 805 * 806 * This function takes the hbalock. 807 * The active bit is always set in the active rrq xri_bitmap even 808 * if there is no slot avaiable for the other rrq information. 809 * 810 * returns 0 rrq actived for this xri 811 * < 0 No memory or invalid ndlp. 812 **/ 813 int 814 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 815 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 816 { 817 int ret; 818 unsigned long iflags; 819 820 spin_lock_irqsave(&phba->hbalock, iflags); 821 ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq); 822 spin_unlock_irqrestore(&phba->hbalock, iflags); 823 return ret; 824 } 825 826 /** 827 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 828 * @phba: Pointer to HBA context object. 829 * @piocb: Pointer to the iocbq. 830 * 831 * This function is called with hbalock held. This function 832 * gets a new driver sglq object from the sglq list. If the 833 * list is not empty then it is successful, it returns pointer to the newly 834 * allocated sglq object else it returns NULL. 835 **/ 836 static struct lpfc_sglq * 837 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 838 { 839 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 840 struct lpfc_sglq *sglq = NULL; 841 struct lpfc_sglq *start_sglq = NULL; 842 struct lpfc_scsi_buf *lpfc_cmd; 843 struct lpfc_nodelist *ndlp; 844 int found = 0; 845 846 if (piocbq->iocb_flag & LPFC_IO_FCP) { 847 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 848 ndlp = lpfc_cmd->rdata->pnode; 849 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 850 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 851 ndlp = piocbq->context_un.ndlp; 852 else 853 ndlp = piocbq->context1; 854 855 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 856 start_sglq = sglq; 857 while (!found) { 858 if (!sglq) 859 return NULL; 860 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 861 /* This xri has an rrq outstanding for this DID. 862 * put it back in the list and get another xri. 863 */ 864 list_add_tail(&sglq->list, lpfc_sgl_list); 865 sglq = NULL; 866 list_remove_head(lpfc_sgl_list, sglq, 867 struct lpfc_sglq, list); 868 if (sglq == start_sglq) { 869 sglq = NULL; 870 break; 871 } else 872 continue; 873 } 874 sglq->ndlp = ndlp; 875 found = 1; 876 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 877 sglq->state = SGL_ALLOCATED; 878 } 879 return sglq; 880 } 881 882 /** 883 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 884 * @phba: Pointer to HBA context object. 885 * 886 * This function is called with no lock held. This function 887 * allocates a new driver iocb object from the iocb pool. If the 888 * allocation is successful, it returns pointer to the newly 889 * allocated iocb object else it returns NULL. 890 **/ 891 struct lpfc_iocbq * 892 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 893 { 894 struct lpfc_iocbq * iocbq = NULL; 895 unsigned long iflags; 896 897 spin_lock_irqsave(&phba->hbalock, iflags); 898 iocbq = __lpfc_sli_get_iocbq(phba); 899 spin_unlock_irqrestore(&phba->hbalock, iflags); 900 return iocbq; 901 } 902 903 /** 904 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 905 * @phba: Pointer to HBA context object. 906 * @iocbq: Pointer to driver iocb object. 907 * 908 * This function is called with hbalock held to release driver 909 * iocb object to the iocb pool. The iotag in the iocb object 910 * does not change for each use of the iocb object. This function 911 * clears all other fields of the iocb object when it is freed. 912 * The sqlq structure that holds the xritag and phys and virtual 913 * mappings for the scatter gather list is retrieved from the 914 * active array of sglq. The get of the sglq pointer also clears 915 * the entry in the array. If the status of the IO indiactes that 916 * this IO was aborted then the sglq entry it put on the 917 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 918 * IO has good status or fails for any other reason then the sglq 919 * entry is added to the free list (lpfc_sgl_list). 920 **/ 921 static void 922 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 923 { 924 struct lpfc_sglq *sglq; 925 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 926 unsigned long iflag = 0; 927 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 928 929 if (iocbq->sli4_xritag == NO_XRI) 930 sglq = NULL; 931 else 932 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 933 934 if (sglq) { 935 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 936 (sglq->state != SGL_XRI_ABORTED)) { 937 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 938 iflag); 939 list_add(&sglq->list, 940 &phba->sli4_hba.lpfc_abts_els_sgl_list); 941 spin_unlock_irqrestore( 942 &phba->sli4_hba.abts_sgl_list_lock, iflag); 943 } else { 944 sglq->state = SGL_FREED; 945 sglq->ndlp = NULL; 946 list_add_tail(&sglq->list, 947 &phba->sli4_hba.lpfc_sgl_list); 948 949 /* Check if TXQ queue needs to be serviced */ 950 if (pring->txq_cnt) 951 lpfc_worker_wake_up(phba); 952 } 953 } 954 955 956 /* 957 * Clean all volatile data fields, preserve iotag and node struct. 958 */ 959 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 960 iocbq->sli4_lxritag = NO_XRI; 961 iocbq->sli4_xritag = NO_XRI; 962 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 963 } 964 965 966 /** 967 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 968 * @phba: Pointer to HBA context object. 969 * @iocbq: Pointer to driver iocb object. 970 * 971 * This function is called with hbalock held to release driver 972 * iocb object to the iocb pool. The iotag in the iocb object 973 * does not change for each use of the iocb object. This function 974 * clears all other fields of the iocb object when it is freed. 975 **/ 976 static void 977 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 978 { 979 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 980 981 /* 982 * Clean all volatile data fields, preserve iotag and node struct. 983 */ 984 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 985 iocbq->sli4_xritag = NO_XRI; 986 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 987 } 988 989 /** 990 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 991 * @phba: Pointer to HBA context object. 992 * @iocbq: Pointer to driver iocb object. 993 * 994 * This function is called with hbalock held to release driver 995 * iocb object to the iocb pool. The iotag in the iocb object 996 * does not change for each use of the iocb object. This function 997 * clears all other fields of the iocb object when it is freed. 998 **/ 999 static void 1000 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1001 { 1002 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1003 phba->iocb_cnt--; 1004 } 1005 1006 /** 1007 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1008 * @phba: Pointer to HBA context object. 1009 * @iocbq: Pointer to driver iocb object. 1010 * 1011 * This function is called with no lock held to release the iocb to 1012 * iocb pool. 1013 **/ 1014 void 1015 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1016 { 1017 unsigned long iflags; 1018 1019 /* 1020 * Clean all volatile data fields, preserve iotag and node struct. 1021 */ 1022 spin_lock_irqsave(&phba->hbalock, iflags); 1023 __lpfc_sli_release_iocbq(phba, iocbq); 1024 spin_unlock_irqrestore(&phba->hbalock, iflags); 1025 } 1026 1027 /** 1028 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1029 * @phba: Pointer to HBA context object. 1030 * @iocblist: List of IOCBs. 1031 * @ulpstatus: ULP status in IOCB command field. 1032 * @ulpWord4: ULP word-4 in IOCB command field. 1033 * 1034 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1035 * on the list by invoking the complete callback function associated with the 1036 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1037 * fields. 1038 **/ 1039 void 1040 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1041 uint32_t ulpstatus, uint32_t ulpWord4) 1042 { 1043 struct lpfc_iocbq *piocb; 1044 1045 while (!list_empty(iocblist)) { 1046 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1047 1048 if (!piocb->iocb_cmpl) 1049 lpfc_sli_release_iocbq(phba, piocb); 1050 else { 1051 piocb->iocb.ulpStatus = ulpstatus; 1052 piocb->iocb.un.ulpWord[4] = ulpWord4; 1053 (piocb->iocb_cmpl) (phba, piocb, piocb); 1054 } 1055 } 1056 return; 1057 } 1058 1059 /** 1060 * lpfc_sli_iocb_cmd_type - Get the iocb type 1061 * @iocb_cmnd: iocb command code. 1062 * 1063 * This function is called by ring event handler function to get the iocb type. 1064 * This function translates the iocb command to an iocb command type used to 1065 * decide the final disposition of each completed IOCB. 1066 * The function returns 1067 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1068 * LPFC_SOL_IOCB if it is a solicited iocb completion 1069 * LPFC_ABORT_IOCB if it is an abort iocb 1070 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1071 * 1072 * The caller is not required to hold any lock. 1073 **/ 1074 static lpfc_iocb_type 1075 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1076 { 1077 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1078 1079 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1080 return 0; 1081 1082 switch (iocb_cmnd) { 1083 case CMD_XMIT_SEQUENCE_CR: 1084 case CMD_XMIT_SEQUENCE_CX: 1085 case CMD_XMIT_BCAST_CN: 1086 case CMD_XMIT_BCAST_CX: 1087 case CMD_ELS_REQUEST_CR: 1088 case CMD_ELS_REQUEST_CX: 1089 case CMD_CREATE_XRI_CR: 1090 case CMD_CREATE_XRI_CX: 1091 case CMD_GET_RPI_CN: 1092 case CMD_XMIT_ELS_RSP_CX: 1093 case CMD_GET_RPI_CR: 1094 case CMD_FCP_IWRITE_CR: 1095 case CMD_FCP_IWRITE_CX: 1096 case CMD_FCP_IREAD_CR: 1097 case CMD_FCP_IREAD_CX: 1098 case CMD_FCP_ICMND_CR: 1099 case CMD_FCP_ICMND_CX: 1100 case CMD_FCP_TSEND_CX: 1101 case CMD_FCP_TRSP_CX: 1102 case CMD_FCP_TRECEIVE_CX: 1103 case CMD_FCP_AUTO_TRSP_CX: 1104 case CMD_ADAPTER_MSG: 1105 case CMD_ADAPTER_DUMP: 1106 case CMD_XMIT_SEQUENCE64_CR: 1107 case CMD_XMIT_SEQUENCE64_CX: 1108 case CMD_XMIT_BCAST64_CN: 1109 case CMD_XMIT_BCAST64_CX: 1110 case CMD_ELS_REQUEST64_CR: 1111 case CMD_ELS_REQUEST64_CX: 1112 case CMD_FCP_IWRITE64_CR: 1113 case CMD_FCP_IWRITE64_CX: 1114 case CMD_FCP_IREAD64_CR: 1115 case CMD_FCP_IREAD64_CX: 1116 case CMD_FCP_ICMND64_CR: 1117 case CMD_FCP_ICMND64_CX: 1118 case CMD_FCP_TSEND64_CX: 1119 case CMD_FCP_TRSP64_CX: 1120 case CMD_FCP_TRECEIVE64_CX: 1121 case CMD_GEN_REQUEST64_CR: 1122 case CMD_GEN_REQUEST64_CX: 1123 case CMD_XMIT_ELS_RSP64_CX: 1124 case DSSCMD_IWRITE64_CR: 1125 case DSSCMD_IWRITE64_CX: 1126 case DSSCMD_IREAD64_CR: 1127 case DSSCMD_IREAD64_CX: 1128 type = LPFC_SOL_IOCB; 1129 break; 1130 case CMD_ABORT_XRI_CN: 1131 case CMD_ABORT_XRI_CX: 1132 case CMD_CLOSE_XRI_CN: 1133 case CMD_CLOSE_XRI_CX: 1134 case CMD_XRI_ABORTED_CX: 1135 case CMD_ABORT_MXRI64_CN: 1136 case CMD_XMIT_BLS_RSP64_CX: 1137 type = LPFC_ABORT_IOCB; 1138 break; 1139 case CMD_RCV_SEQUENCE_CX: 1140 case CMD_RCV_ELS_REQ_CX: 1141 case CMD_RCV_SEQUENCE64_CX: 1142 case CMD_RCV_ELS_REQ64_CX: 1143 case CMD_ASYNC_STATUS: 1144 case CMD_IOCB_RCV_SEQ64_CX: 1145 case CMD_IOCB_RCV_ELS64_CX: 1146 case CMD_IOCB_RCV_CONT64_CX: 1147 case CMD_IOCB_RET_XRI64_CX: 1148 type = LPFC_UNSOL_IOCB; 1149 break; 1150 case CMD_IOCB_XMIT_MSEQ64_CR: 1151 case CMD_IOCB_XMIT_MSEQ64_CX: 1152 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1153 case CMD_IOCB_RCV_ELS_LIST64_CX: 1154 case CMD_IOCB_CLOSE_EXTENDED_CN: 1155 case CMD_IOCB_ABORT_EXTENDED_CN: 1156 case CMD_IOCB_RET_HBQE64_CN: 1157 case CMD_IOCB_FCP_IBIDIR64_CR: 1158 case CMD_IOCB_FCP_IBIDIR64_CX: 1159 case CMD_IOCB_FCP_ITASKMGT64_CX: 1160 case CMD_IOCB_LOGENTRY_CN: 1161 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1162 printk("%s - Unhandled SLI-3 Command x%x\n", 1163 __func__, iocb_cmnd); 1164 type = LPFC_UNKNOWN_IOCB; 1165 break; 1166 default: 1167 type = LPFC_UNKNOWN_IOCB; 1168 break; 1169 } 1170 1171 return type; 1172 } 1173 1174 /** 1175 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1176 * @phba: Pointer to HBA context object. 1177 * 1178 * This function is called from SLI initialization code 1179 * to configure every ring of the HBA's SLI interface. The 1180 * caller is not required to hold any lock. This function issues 1181 * a config_ring mailbox command for each ring. 1182 * This function returns zero if successful else returns a negative 1183 * error code. 1184 **/ 1185 static int 1186 lpfc_sli_ring_map(struct lpfc_hba *phba) 1187 { 1188 struct lpfc_sli *psli = &phba->sli; 1189 LPFC_MBOXQ_t *pmb; 1190 MAILBOX_t *pmbox; 1191 int i, rc, ret = 0; 1192 1193 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1194 if (!pmb) 1195 return -ENOMEM; 1196 pmbox = &pmb->u.mb; 1197 phba->link_state = LPFC_INIT_MBX_CMDS; 1198 for (i = 0; i < psli->num_rings; i++) { 1199 lpfc_config_ring(phba, i, pmb); 1200 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1201 if (rc != MBX_SUCCESS) { 1202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1203 "0446 Adapter failed to init (%d), " 1204 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1205 "ring %d\n", 1206 rc, pmbox->mbxCommand, 1207 pmbox->mbxStatus, i); 1208 phba->link_state = LPFC_HBA_ERROR; 1209 ret = -ENXIO; 1210 break; 1211 } 1212 } 1213 mempool_free(pmb, phba->mbox_mem_pool); 1214 return ret; 1215 } 1216 1217 /** 1218 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1219 * @phba: Pointer to HBA context object. 1220 * @pring: Pointer to driver SLI ring object. 1221 * @piocb: Pointer to the driver iocb object. 1222 * 1223 * This function is called with hbalock held. The function adds the 1224 * new iocb to txcmplq of the given ring. This function always returns 1225 * 0. If this function is called for ELS ring, this function checks if 1226 * there is a vport associated with the ELS command. This function also 1227 * starts els_tmofunc timer if this is an ELS command. 1228 **/ 1229 static int 1230 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1231 struct lpfc_iocbq *piocb) 1232 { 1233 list_add_tail(&piocb->list, &pring->txcmplq); 1234 piocb->iocb_flag |= LPFC_IO_ON_Q; 1235 pring->txcmplq_cnt++; 1236 if (pring->txcmplq_cnt > pring->txcmplq_max) 1237 pring->txcmplq_max = pring->txcmplq_cnt; 1238 1239 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1240 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1241 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1242 if (!piocb->vport) 1243 BUG(); 1244 else 1245 mod_timer(&piocb->vport->els_tmofunc, 1246 jiffies + HZ * (phba->fc_ratov << 1)); 1247 } 1248 1249 1250 return 0; 1251 } 1252 1253 /** 1254 * lpfc_sli_ringtx_get - Get first element of the txq 1255 * @phba: Pointer to HBA context object. 1256 * @pring: Pointer to driver SLI ring object. 1257 * 1258 * This function is called with hbalock held to get next 1259 * iocb in txq of the given ring. If there is any iocb in 1260 * the txq, the function returns first iocb in the list after 1261 * removing the iocb from the list, else it returns NULL. 1262 **/ 1263 struct lpfc_iocbq * 1264 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1265 { 1266 struct lpfc_iocbq *cmd_iocb; 1267 1268 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1269 if (cmd_iocb != NULL) 1270 pring->txq_cnt--; 1271 return cmd_iocb; 1272 } 1273 1274 /** 1275 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1276 * @phba: Pointer to HBA context object. 1277 * @pring: Pointer to driver SLI ring object. 1278 * 1279 * This function is called with hbalock held and the caller must post the 1280 * iocb without releasing the lock. If the caller releases the lock, 1281 * iocb slot returned by the function is not guaranteed to be available. 1282 * The function returns pointer to the next available iocb slot if there 1283 * is available slot in the ring, else it returns NULL. 1284 * If the get index of the ring is ahead of the put index, the function 1285 * will post an error attention event to the worker thread to take the 1286 * HBA to offline state. 1287 **/ 1288 static IOCB_t * 1289 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1290 { 1291 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1292 uint32_t max_cmd_idx = pring->numCiocb; 1293 if ((pring->next_cmdidx == pring->cmdidx) && 1294 (++pring->next_cmdidx >= max_cmd_idx)) 1295 pring->next_cmdidx = 0; 1296 1297 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1298 1299 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1300 1301 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1302 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1303 "0315 Ring %d issue: portCmdGet %d " 1304 "is bigger than cmd ring %d\n", 1305 pring->ringno, 1306 pring->local_getidx, max_cmd_idx); 1307 1308 phba->link_state = LPFC_HBA_ERROR; 1309 /* 1310 * All error attention handlers are posted to 1311 * worker thread 1312 */ 1313 phba->work_ha |= HA_ERATT; 1314 phba->work_hs = HS_FFER3; 1315 1316 lpfc_worker_wake_up(phba); 1317 1318 return NULL; 1319 } 1320 1321 if (pring->local_getidx == pring->next_cmdidx) 1322 return NULL; 1323 } 1324 1325 return lpfc_cmd_iocb(phba, pring); 1326 } 1327 1328 /** 1329 * lpfc_sli_next_iotag - Get an iotag for the iocb 1330 * @phba: Pointer to HBA context object. 1331 * @iocbq: Pointer to driver iocb object. 1332 * 1333 * This function gets an iotag for the iocb. If there is no unused iotag and 1334 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1335 * array and assigns a new iotag. 1336 * The function returns the allocated iotag if successful, else returns zero. 1337 * Zero is not a valid iotag. 1338 * The caller is not required to hold any lock. 1339 **/ 1340 uint16_t 1341 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1342 { 1343 struct lpfc_iocbq **new_arr; 1344 struct lpfc_iocbq **old_arr; 1345 size_t new_len; 1346 struct lpfc_sli *psli = &phba->sli; 1347 uint16_t iotag; 1348 1349 spin_lock_irq(&phba->hbalock); 1350 iotag = psli->last_iotag; 1351 if(++iotag < psli->iocbq_lookup_len) { 1352 psli->last_iotag = iotag; 1353 psli->iocbq_lookup[iotag] = iocbq; 1354 spin_unlock_irq(&phba->hbalock); 1355 iocbq->iotag = iotag; 1356 return iotag; 1357 } else if (psli->iocbq_lookup_len < (0xffff 1358 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1359 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1360 spin_unlock_irq(&phba->hbalock); 1361 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1362 GFP_KERNEL); 1363 if (new_arr) { 1364 spin_lock_irq(&phba->hbalock); 1365 old_arr = psli->iocbq_lookup; 1366 if (new_len <= psli->iocbq_lookup_len) { 1367 /* highly unprobable case */ 1368 kfree(new_arr); 1369 iotag = psli->last_iotag; 1370 if(++iotag < psli->iocbq_lookup_len) { 1371 psli->last_iotag = iotag; 1372 psli->iocbq_lookup[iotag] = iocbq; 1373 spin_unlock_irq(&phba->hbalock); 1374 iocbq->iotag = iotag; 1375 return iotag; 1376 } 1377 spin_unlock_irq(&phba->hbalock); 1378 return 0; 1379 } 1380 if (psli->iocbq_lookup) 1381 memcpy(new_arr, old_arr, 1382 ((psli->last_iotag + 1) * 1383 sizeof (struct lpfc_iocbq *))); 1384 psli->iocbq_lookup = new_arr; 1385 psli->iocbq_lookup_len = new_len; 1386 psli->last_iotag = iotag; 1387 psli->iocbq_lookup[iotag] = iocbq; 1388 spin_unlock_irq(&phba->hbalock); 1389 iocbq->iotag = iotag; 1390 kfree(old_arr); 1391 return iotag; 1392 } 1393 } else 1394 spin_unlock_irq(&phba->hbalock); 1395 1396 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1397 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1398 psli->last_iotag); 1399 1400 return 0; 1401 } 1402 1403 /** 1404 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1405 * @phba: Pointer to HBA context object. 1406 * @pring: Pointer to driver SLI ring object. 1407 * @iocb: Pointer to iocb slot in the ring. 1408 * @nextiocb: Pointer to driver iocb object which need to be 1409 * posted to firmware. 1410 * 1411 * This function is called with hbalock held to post a new iocb to 1412 * the firmware. This function copies the new iocb to ring iocb slot and 1413 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1414 * a completion call back for this iocb else the function will free the 1415 * iocb object. 1416 **/ 1417 static void 1418 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1419 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1420 { 1421 /* 1422 * Set up an iotag 1423 */ 1424 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1425 1426 1427 if (pring->ringno == LPFC_ELS_RING) { 1428 lpfc_debugfs_slow_ring_trc(phba, 1429 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1430 *(((uint32_t *) &nextiocb->iocb) + 4), 1431 *(((uint32_t *) &nextiocb->iocb) + 6), 1432 *(((uint32_t *) &nextiocb->iocb) + 7)); 1433 } 1434 1435 /* 1436 * Issue iocb command to adapter 1437 */ 1438 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1439 wmb(); 1440 pring->stats.iocb_cmd++; 1441 1442 /* 1443 * If there is no completion routine to call, we can release the 1444 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1445 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1446 */ 1447 if (nextiocb->iocb_cmpl) 1448 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1449 else 1450 __lpfc_sli_release_iocbq(phba, nextiocb); 1451 1452 /* 1453 * Let the HBA know what IOCB slot will be the next one the 1454 * driver will put a command into. 1455 */ 1456 pring->cmdidx = pring->next_cmdidx; 1457 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1458 } 1459 1460 /** 1461 * lpfc_sli_update_full_ring - Update the chip attention register 1462 * @phba: Pointer to HBA context object. 1463 * @pring: Pointer to driver SLI ring object. 1464 * 1465 * The caller is not required to hold any lock for calling this function. 1466 * This function updates the chip attention bits for the ring to inform firmware 1467 * that there are pending work to be done for this ring and requests an 1468 * interrupt when there is space available in the ring. This function is 1469 * called when the driver is unable to post more iocbs to the ring due 1470 * to unavailability of space in the ring. 1471 **/ 1472 static void 1473 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1474 { 1475 int ringno = pring->ringno; 1476 1477 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1478 1479 wmb(); 1480 1481 /* 1482 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1483 * The HBA will tell us when an IOCB entry is available. 1484 */ 1485 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1486 readl(phba->CAregaddr); /* flush */ 1487 1488 pring->stats.iocb_cmd_full++; 1489 } 1490 1491 /** 1492 * lpfc_sli_update_ring - Update chip attention register 1493 * @phba: Pointer to HBA context object. 1494 * @pring: Pointer to driver SLI ring object. 1495 * 1496 * This function updates the chip attention register bit for the 1497 * given ring to inform HBA that there is more work to be done 1498 * in this ring. The caller is not required to hold any lock. 1499 **/ 1500 static void 1501 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1502 { 1503 int ringno = pring->ringno; 1504 1505 /* 1506 * Tell the HBA that there is work to do in this ring. 1507 */ 1508 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1509 wmb(); 1510 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1511 readl(phba->CAregaddr); /* flush */ 1512 } 1513 } 1514 1515 /** 1516 * lpfc_sli_resume_iocb - Process iocbs in the txq 1517 * @phba: Pointer to HBA context object. 1518 * @pring: Pointer to driver SLI ring object. 1519 * 1520 * This function is called with hbalock held to post pending iocbs 1521 * in the txq to the firmware. This function is called when driver 1522 * detects space available in the ring. 1523 **/ 1524 static void 1525 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1526 { 1527 IOCB_t *iocb; 1528 struct lpfc_iocbq *nextiocb; 1529 1530 /* 1531 * Check to see if: 1532 * (a) there is anything on the txq to send 1533 * (b) link is up 1534 * (c) link attention events can be processed (fcp ring only) 1535 * (d) IOCB processing is not blocked by the outstanding mbox command. 1536 */ 1537 if (pring->txq_cnt && 1538 lpfc_is_link_up(phba) && 1539 (pring->ringno != phba->sli.fcp_ring || 1540 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1541 1542 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1543 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1544 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1545 1546 if (iocb) 1547 lpfc_sli_update_ring(phba, pring); 1548 else 1549 lpfc_sli_update_full_ring(phba, pring); 1550 } 1551 1552 return; 1553 } 1554 1555 /** 1556 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1557 * @phba: Pointer to HBA context object. 1558 * @hbqno: HBQ number. 1559 * 1560 * This function is called with hbalock held to get the next 1561 * available slot for the given HBQ. If there is free slot 1562 * available for the HBQ it will return pointer to the next available 1563 * HBQ entry else it will return NULL. 1564 **/ 1565 static struct lpfc_hbq_entry * 1566 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1567 { 1568 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1569 1570 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1571 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1572 hbqp->next_hbqPutIdx = 0; 1573 1574 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1575 uint32_t raw_index = phba->hbq_get[hbqno]; 1576 uint32_t getidx = le32_to_cpu(raw_index); 1577 1578 hbqp->local_hbqGetIdx = getidx; 1579 1580 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1581 lpfc_printf_log(phba, KERN_ERR, 1582 LOG_SLI | LOG_VPORT, 1583 "1802 HBQ %d: local_hbqGetIdx " 1584 "%u is > than hbqp->entry_count %u\n", 1585 hbqno, hbqp->local_hbqGetIdx, 1586 hbqp->entry_count); 1587 1588 phba->link_state = LPFC_HBA_ERROR; 1589 return NULL; 1590 } 1591 1592 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1593 return NULL; 1594 } 1595 1596 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1597 hbqp->hbqPutIdx; 1598 } 1599 1600 /** 1601 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1602 * @phba: Pointer to HBA context object. 1603 * 1604 * This function is called with no lock held to free all the 1605 * hbq buffers while uninitializing the SLI interface. It also 1606 * frees the HBQ buffers returned by the firmware but not yet 1607 * processed by the upper layers. 1608 **/ 1609 void 1610 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1611 { 1612 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1613 struct hbq_dmabuf *hbq_buf; 1614 unsigned long flags; 1615 int i, hbq_count; 1616 uint32_t hbqno; 1617 1618 hbq_count = lpfc_sli_hbq_count(); 1619 /* Return all memory used by all HBQs */ 1620 spin_lock_irqsave(&phba->hbalock, flags); 1621 for (i = 0; i < hbq_count; ++i) { 1622 list_for_each_entry_safe(dmabuf, next_dmabuf, 1623 &phba->hbqs[i].hbq_buffer_list, list) { 1624 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1625 list_del(&hbq_buf->dbuf.list); 1626 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1627 } 1628 phba->hbqs[i].buffer_count = 0; 1629 } 1630 /* Return all HBQ buffer that are in-fly */ 1631 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1632 list) { 1633 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1634 list_del(&hbq_buf->dbuf.list); 1635 if (hbq_buf->tag == -1) { 1636 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1637 (phba, hbq_buf); 1638 } else { 1639 hbqno = hbq_buf->tag >> 16; 1640 if (hbqno >= LPFC_MAX_HBQS) 1641 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1642 (phba, hbq_buf); 1643 else 1644 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1645 hbq_buf); 1646 } 1647 } 1648 1649 /* Mark the HBQs not in use */ 1650 phba->hbq_in_use = 0; 1651 spin_unlock_irqrestore(&phba->hbalock, flags); 1652 } 1653 1654 /** 1655 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1656 * @phba: Pointer to HBA context object. 1657 * @hbqno: HBQ number. 1658 * @hbq_buf: Pointer to HBQ buffer. 1659 * 1660 * This function is called with the hbalock held to post a 1661 * hbq buffer to the firmware. If the function finds an empty 1662 * slot in the HBQ, it will post the buffer. The function will return 1663 * pointer to the hbq entry if it successfully post the buffer 1664 * else it will return NULL. 1665 **/ 1666 static int 1667 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1668 struct hbq_dmabuf *hbq_buf) 1669 { 1670 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1671 } 1672 1673 /** 1674 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1675 * @phba: Pointer to HBA context object. 1676 * @hbqno: HBQ number. 1677 * @hbq_buf: Pointer to HBQ buffer. 1678 * 1679 * This function is called with the hbalock held to post a hbq buffer to the 1680 * firmware. If the function finds an empty slot in the HBQ, it will post the 1681 * buffer and place it on the hbq_buffer_list. The function will return zero if 1682 * it successfully post the buffer else it will return an error. 1683 **/ 1684 static int 1685 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1686 struct hbq_dmabuf *hbq_buf) 1687 { 1688 struct lpfc_hbq_entry *hbqe; 1689 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1690 1691 /* Get next HBQ entry slot to use */ 1692 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1693 if (hbqe) { 1694 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1695 1696 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1697 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1698 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1699 hbqe->bde.tus.f.bdeFlags = 0; 1700 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1701 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1702 /* Sync SLIM */ 1703 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1704 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1705 /* flush */ 1706 readl(phba->hbq_put + hbqno); 1707 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1708 return 0; 1709 } else 1710 return -ENOMEM; 1711 } 1712 1713 /** 1714 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1715 * @phba: Pointer to HBA context object. 1716 * @hbqno: HBQ number. 1717 * @hbq_buf: Pointer to HBQ buffer. 1718 * 1719 * This function is called with the hbalock held to post an RQE to the SLI4 1720 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1721 * the hbq_buffer_list and return zero, otherwise it will return an error. 1722 **/ 1723 static int 1724 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1725 struct hbq_dmabuf *hbq_buf) 1726 { 1727 int rc; 1728 struct lpfc_rqe hrqe; 1729 struct lpfc_rqe drqe; 1730 1731 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1732 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1733 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1734 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1735 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1736 &hrqe, &drqe); 1737 if (rc < 0) 1738 return rc; 1739 hbq_buf->tag = rc; 1740 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1741 return 0; 1742 } 1743 1744 /* HBQ for ELS and CT traffic. */ 1745 static struct lpfc_hbq_init lpfc_els_hbq = { 1746 .rn = 1, 1747 .entry_count = 256, 1748 .mask_count = 0, 1749 .profile = 0, 1750 .ring_mask = (1 << LPFC_ELS_RING), 1751 .buffer_count = 0, 1752 .init_count = 40, 1753 .add_count = 40, 1754 }; 1755 1756 /* HBQ for the extra ring if needed */ 1757 static struct lpfc_hbq_init lpfc_extra_hbq = { 1758 .rn = 1, 1759 .entry_count = 200, 1760 .mask_count = 0, 1761 .profile = 0, 1762 .ring_mask = (1 << LPFC_EXTRA_RING), 1763 .buffer_count = 0, 1764 .init_count = 0, 1765 .add_count = 5, 1766 }; 1767 1768 /* Array of HBQs */ 1769 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1770 &lpfc_els_hbq, 1771 &lpfc_extra_hbq, 1772 }; 1773 1774 /** 1775 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1776 * @phba: Pointer to HBA context object. 1777 * @hbqno: HBQ number. 1778 * @count: Number of HBQ buffers to be posted. 1779 * 1780 * This function is called with no lock held to post more hbq buffers to the 1781 * given HBQ. The function returns the number of HBQ buffers successfully 1782 * posted. 1783 **/ 1784 static int 1785 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1786 { 1787 uint32_t i, posted = 0; 1788 unsigned long flags; 1789 struct hbq_dmabuf *hbq_buffer; 1790 LIST_HEAD(hbq_buf_list); 1791 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1792 return 0; 1793 1794 if ((phba->hbqs[hbqno].buffer_count + count) > 1795 lpfc_hbq_defs[hbqno]->entry_count) 1796 count = lpfc_hbq_defs[hbqno]->entry_count - 1797 phba->hbqs[hbqno].buffer_count; 1798 if (!count) 1799 return 0; 1800 /* Allocate HBQ entries */ 1801 for (i = 0; i < count; i++) { 1802 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1803 if (!hbq_buffer) 1804 break; 1805 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1806 } 1807 /* Check whether HBQ is still in use */ 1808 spin_lock_irqsave(&phba->hbalock, flags); 1809 if (!phba->hbq_in_use) 1810 goto err; 1811 while (!list_empty(&hbq_buf_list)) { 1812 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1813 dbuf.list); 1814 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1815 (hbqno << 16)); 1816 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1817 phba->hbqs[hbqno].buffer_count++; 1818 posted++; 1819 } else 1820 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1821 } 1822 spin_unlock_irqrestore(&phba->hbalock, flags); 1823 return posted; 1824 err: 1825 spin_unlock_irqrestore(&phba->hbalock, flags); 1826 while (!list_empty(&hbq_buf_list)) { 1827 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1828 dbuf.list); 1829 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1830 } 1831 return 0; 1832 } 1833 1834 /** 1835 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1836 * @phba: Pointer to HBA context object. 1837 * @qno: HBQ number. 1838 * 1839 * This function posts more buffers to the HBQ. This function 1840 * is called with no lock held. The function returns the number of HBQ entries 1841 * successfully allocated. 1842 **/ 1843 int 1844 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1845 { 1846 if (phba->sli_rev == LPFC_SLI_REV4) 1847 return 0; 1848 else 1849 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1850 lpfc_hbq_defs[qno]->add_count); 1851 } 1852 1853 /** 1854 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1855 * @phba: Pointer to HBA context object. 1856 * @qno: HBQ queue number. 1857 * 1858 * This function is called from SLI initialization code path with 1859 * no lock held to post initial HBQ buffers to firmware. The 1860 * function returns the number of HBQ entries successfully allocated. 1861 **/ 1862 static int 1863 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1864 { 1865 if (phba->sli_rev == LPFC_SLI_REV4) 1866 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1867 lpfc_hbq_defs[qno]->entry_count); 1868 else 1869 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1870 lpfc_hbq_defs[qno]->init_count); 1871 } 1872 1873 /** 1874 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1875 * @phba: Pointer to HBA context object. 1876 * @hbqno: HBQ number. 1877 * 1878 * This function removes the first hbq buffer on an hbq list and returns a 1879 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1880 **/ 1881 static struct hbq_dmabuf * 1882 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1883 { 1884 struct lpfc_dmabuf *d_buf; 1885 1886 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1887 if (!d_buf) 1888 return NULL; 1889 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1890 } 1891 1892 /** 1893 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1894 * @phba: Pointer to HBA context object. 1895 * @tag: Tag of the hbq buffer. 1896 * 1897 * This function is called with hbalock held. This function searches 1898 * for the hbq buffer associated with the given tag in the hbq buffer 1899 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1900 * it returns NULL. 1901 **/ 1902 static struct hbq_dmabuf * 1903 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1904 { 1905 struct lpfc_dmabuf *d_buf; 1906 struct hbq_dmabuf *hbq_buf; 1907 uint32_t hbqno; 1908 1909 hbqno = tag >> 16; 1910 if (hbqno >= LPFC_MAX_HBQS) 1911 return NULL; 1912 1913 spin_lock_irq(&phba->hbalock); 1914 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1915 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1916 if (hbq_buf->tag == tag) { 1917 spin_unlock_irq(&phba->hbalock); 1918 return hbq_buf; 1919 } 1920 } 1921 spin_unlock_irq(&phba->hbalock); 1922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1923 "1803 Bad hbq tag. Data: x%x x%x\n", 1924 tag, phba->hbqs[tag >> 16].buffer_count); 1925 return NULL; 1926 } 1927 1928 /** 1929 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 1930 * @phba: Pointer to HBA context object. 1931 * @hbq_buffer: Pointer to HBQ buffer. 1932 * 1933 * This function is called with hbalock. This function gives back 1934 * the hbq buffer to firmware. If the HBQ does not have space to 1935 * post the buffer, it will free the buffer. 1936 **/ 1937 void 1938 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 1939 { 1940 uint32_t hbqno; 1941 1942 if (hbq_buffer) { 1943 hbqno = hbq_buffer->tag >> 16; 1944 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 1945 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1946 } 1947 } 1948 1949 /** 1950 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 1951 * @mbxCommand: mailbox command code. 1952 * 1953 * This function is called by the mailbox event handler function to verify 1954 * that the completed mailbox command is a legitimate mailbox command. If the 1955 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 1956 * and the mailbox event handler will take the HBA offline. 1957 **/ 1958 static int 1959 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 1960 { 1961 uint8_t ret; 1962 1963 switch (mbxCommand) { 1964 case MBX_LOAD_SM: 1965 case MBX_READ_NV: 1966 case MBX_WRITE_NV: 1967 case MBX_WRITE_VPARMS: 1968 case MBX_RUN_BIU_DIAG: 1969 case MBX_INIT_LINK: 1970 case MBX_DOWN_LINK: 1971 case MBX_CONFIG_LINK: 1972 case MBX_CONFIG_RING: 1973 case MBX_RESET_RING: 1974 case MBX_READ_CONFIG: 1975 case MBX_READ_RCONFIG: 1976 case MBX_READ_SPARM: 1977 case MBX_READ_STATUS: 1978 case MBX_READ_RPI: 1979 case MBX_READ_XRI: 1980 case MBX_READ_REV: 1981 case MBX_READ_LNK_STAT: 1982 case MBX_REG_LOGIN: 1983 case MBX_UNREG_LOGIN: 1984 case MBX_CLEAR_LA: 1985 case MBX_DUMP_MEMORY: 1986 case MBX_DUMP_CONTEXT: 1987 case MBX_RUN_DIAGS: 1988 case MBX_RESTART: 1989 case MBX_UPDATE_CFG: 1990 case MBX_DOWN_LOAD: 1991 case MBX_DEL_LD_ENTRY: 1992 case MBX_RUN_PROGRAM: 1993 case MBX_SET_MASK: 1994 case MBX_SET_VARIABLE: 1995 case MBX_UNREG_D_ID: 1996 case MBX_KILL_BOARD: 1997 case MBX_CONFIG_FARP: 1998 case MBX_BEACON: 1999 case MBX_LOAD_AREA: 2000 case MBX_RUN_BIU_DIAG64: 2001 case MBX_CONFIG_PORT: 2002 case MBX_READ_SPARM64: 2003 case MBX_READ_RPI64: 2004 case MBX_REG_LOGIN64: 2005 case MBX_READ_TOPOLOGY: 2006 case MBX_WRITE_WWN: 2007 case MBX_SET_DEBUG: 2008 case MBX_LOAD_EXP_ROM: 2009 case MBX_ASYNCEVT_ENABLE: 2010 case MBX_REG_VPI: 2011 case MBX_UNREG_VPI: 2012 case MBX_HEARTBEAT: 2013 case MBX_PORT_CAPABILITIES: 2014 case MBX_PORT_IOV_CONTROL: 2015 case MBX_SLI4_CONFIG: 2016 case MBX_SLI4_REQ_FTRS: 2017 case MBX_REG_FCFI: 2018 case MBX_UNREG_FCFI: 2019 case MBX_REG_VFI: 2020 case MBX_UNREG_VFI: 2021 case MBX_INIT_VPI: 2022 case MBX_INIT_VFI: 2023 case MBX_RESUME_RPI: 2024 case MBX_READ_EVENT_LOG_STATUS: 2025 case MBX_READ_EVENT_LOG: 2026 case MBX_SECURITY_MGMT: 2027 case MBX_AUTH_PORT: 2028 ret = mbxCommand; 2029 break; 2030 default: 2031 ret = MBX_SHUTDOWN; 2032 break; 2033 } 2034 return ret; 2035 } 2036 2037 /** 2038 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2039 * @phba: Pointer to HBA context object. 2040 * @pmboxq: Pointer to mailbox command. 2041 * 2042 * This is completion handler function for mailbox commands issued from 2043 * lpfc_sli_issue_mbox_wait function. This function is called by the 2044 * mailbox event handler function with no lock held. This function 2045 * will wake up thread waiting on the wait queue pointed by context1 2046 * of the mailbox. 2047 **/ 2048 void 2049 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2050 { 2051 wait_queue_head_t *pdone_q; 2052 unsigned long drvr_flag; 2053 2054 /* 2055 * If pdone_q is empty, the driver thread gave up waiting and 2056 * continued running. 2057 */ 2058 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2059 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2060 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2061 if (pdone_q) 2062 wake_up_interruptible(pdone_q); 2063 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2064 return; 2065 } 2066 2067 2068 /** 2069 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2070 * @phba: Pointer to HBA context object. 2071 * @pmb: Pointer to mailbox object. 2072 * 2073 * This function is the default mailbox completion handler. It 2074 * frees the memory resources associated with the completed mailbox 2075 * command. If the completed command is a REG_LOGIN mailbox command, 2076 * this function will issue a UREG_LOGIN to re-claim the RPI. 2077 **/ 2078 void 2079 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2080 { 2081 struct lpfc_vport *vport = pmb->vport; 2082 struct lpfc_dmabuf *mp; 2083 struct lpfc_nodelist *ndlp; 2084 struct Scsi_Host *shost; 2085 uint16_t rpi, vpi; 2086 int rc; 2087 2088 mp = (struct lpfc_dmabuf *) (pmb->context1); 2089 2090 if (mp) { 2091 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2092 kfree(mp); 2093 } 2094 2095 /* 2096 * If a REG_LOGIN succeeded after node is destroyed or node 2097 * is in re-discovery driver need to cleanup the RPI. 2098 */ 2099 if (!(phba->pport->load_flag & FC_UNLOADING) && 2100 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2101 !pmb->u.mb.mbxStatus) { 2102 rpi = pmb->u.mb.un.varWords[0]; 2103 vpi = pmb->u.mb.un.varRegLogin.vpi; 2104 lpfc_unreg_login(phba, vpi, rpi, pmb); 2105 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2107 if (rc != MBX_NOT_FINISHED) 2108 return; 2109 } 2110 2111 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2112 !(phba->pport->load_flag & FC_UNLOADING) && 2113 !pmb->u.mb.mbxStatus) { 2114 shost = lpfc_shost_from_vport(vport); 2115 spin_lock_irq(shost->host_lock); 2116 vport->vpi_state |= LPFC_VPI_REGISTERED; 2117 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2118 spin_unlock_irq(shost->host_lock); 2119 } 2120 2121 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2122 ndlp = (struct lpfc_nodelist *)pmb->context2; 2123 lpfc_nlp_put(ndlp); 2124 pmb->context2 = NULL; 2125 } 2126 2127 /* Check security permission status on INIT_LINK mailbox command */ 2128 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2129 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2130 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2131 "2860 SLI authentication is required " 2132 "for INIT_LINK but has not done yet\n"); 2133 2134 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2135 lpfc_sli4_mbox_cmd_free(phba, pmb); 2136 else 2137 mempool_free(pmb, phba->mbox_mem_pool); 2138 } 2139 2140 /** 2141 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2142 * @phba: Pointer to HBA context object. 2143 * 2144 * This function is called with no lock held. This function processes all 2145 * the completed mailbox commands and gives it to upper layers. The interrupt 2146 * service routine processes mailbox completion interrupt and adds completed 2147 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2148 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2149 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2150 * function returns the mailbox commands to the upper layer by calling the 2151 * completion handler function of each mailbox. 2152 **/ 2153 int 2154 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2155 { 2156 MAILBOX_t *pmbox; 2157 LPFC_MBOXQ_t *pmb; 2158 int rc; 2159 LIST_HEAD(cmplq); 2160 2161 phba->sli.slistat.mbox_event++; 2162 2163 /* Get all completed mailboxe buffers into the cmplq */ 2164 spin_lock_irq(&phba->hbalock); 2165 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2166 spin_unlock_irq(&phba->hbalock); 2167 2168 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2169 do { 2170 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2171 if (pmb == NULL) 2172 break; 2173 2174 pmbox = &pmb->u.mb; 2175 2176 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2177 if (pmb->vport) { 2178 lpfc_debugfs_disc_trc(pmb->vport, 2179 LPFC_DISC_TRC_MBOX_VPORT, 2180 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2181 (uint32_t)pmbox->mbxCommand, 2182 pmbox->un.varWords[0], 2183 pmbox->un.varWords[1]); 2184 } 2185 else { 2186 lpfc_debugfs_disc_trc(phba->pport, 2187 LPFC_DISC_TRC_MBOX, 2188 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2189 (uint32_t)pmbox->mbxCommand, 2190 pmbox->un.varWords[0], 2191 pmbox->un.varWords[1]); 2192 } 2193 } 2194 2195 /* 2196 * It is a fatal error if unknown mbox command completion. 2197 */ 2198 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2199 MBX_SHUTDOWN) { 2200 /* Unknown mailbox command compl */ 2201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2202 "(%d):0323 Unknown Mailbox command " 2203 "x%x (x%x) Cmpl\n", 2204 pmb->vport ? pmb->vport->vpi : 0, 2205 pmbox->mbxCommand, 2206 lpfc_sli4_mbox_opcode_get(phba, pmb)); 2207 phba->link_state = LPFC_HBA_ERROR; 2208 phba->work_hs = HS_FFER3; 2209 lpfc_handle_eratt(phba); 2210 continue; 2211 } 2212 2213 if (pmbox->mbxStatus) { 2214 phba->sli.slistat.mbox_stat_err++; 2215 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2216 /* Mbox cmd cmpl error - RETRYing */ 2217 lpfc_printf_log(phba, KERN_INFO, 2218 LOG_MBOX | LOG_SLI, 2219 "(%d):0305 Mbox cmd cmpl " 2220 "error - RETRYing Data: x%x " 2221 "(x%x) x%x x%x x%x\n", 2222 pmb->vport ? pmb->vport->vpi :0, 2223 pmbox->mbxCommand, 2224 lpfc_sli4_mbox_opcode_get(phba, 2225 pmb), 2226 pmbox->mbxStatus, 2227 pmbox->un.varWords[0], 2228 pmb->vport->port_state); 2229 pmbox->mbxStatus = 0; 2230 pmbox->mbxOwner = OWN_HOST; 2231 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2232 if (rc != MBX_NOT_FINISHED) 2233 continue; 2234 } 2235 } 2236 2237 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2238 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2239 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " 2240 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2241 pmb->vport ? pmb->vport->vpi : 0, 2242 pmbox->mbxCommand, 2243 lpfc_sli4_mbox_opcode_get(phba, pmb), 2244 pmb->mbox_cmpl, 2245 *((uint32_t *) pmbox), 2246 pmbox->un.varWords[0], 2247 pmbox->un.varWords[1], 2248 pmbox->un.varWords[2], 2249 pmbox->un.varWords[3], 2250 pmbox->un.varWords[4], 2251 pmbox->un.varWords[5], 2252 pmbox->un.varWords[6], 2253 pmbox->un.varWords[7]); 2254 2255 if (pmb->mbox_cmpl) 2256 pmb->mbox_cmpl(phba,pmb); 2257 } while (1); 2258 return 0; 2259 } 2260 2261 /** 2262 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2263 * @phba: Pointer to HBA context object. 2264 * @pring: Pointer to driver SLI ring object. 2265 * @tag: buffer tag. 2266 * 2267 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2268 * is set in the tag the buffer is posted for a particular exchange, 2269 * the function will return the buffer without replacing the buffer. 2270 * If the buffer is for unsolicited ELS or CT traffic, this function 2271 * returns the buffer and also posts another buffer to the firmware. 2272 **/ 2273 static struct lpfc_dmabuf * 2274 lpfc_sli_get_buff(struct lpfc_hba *phba, 2275 struct lpfc_sli_ring *pring, 2276 uint32_t tag) 2277 { 2278 struct hbq_dmabuf *hbq_entry; 2279 2280 if (tag & QUE_BUFTAG_BIT) 2281 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2282 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2283 if (!hbq_entry) 2284 return NULL; 2285 return &hbq_entry->dbuf; 2286 } 2287 2288 /** 2289 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2290 * @phba: Pointer to HBA context object. 2291 * @pring: Pointer to driver SLI ring object. 2292 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2293 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2294 * @fch_type: the type for the first frame of the sequence. 2295 * 2296 * This function is called with no lock held. This function uses the r_ctl and 2297 * type of the received sequence to find the correct callback function to call 2298 * to process the sequence. 2299 **/ 2300 static int 2301 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2302 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2303 uint32_t fch_type) 2304 { 2305 int i; 2306 2307 /* unSolicited Responses */ 2308 if (pring->prt[0].profile) { 2309 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2310 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2311 saveq); 2312 return 1; 2313 } 2314 /* We must search, based on rctl / type 2315 for the right routine */ 2316 for (i = 0; i < pring->num_mask; i++) { 2317 if ((pring->prt[i].rctl == fch_r_ctl) && 2318 (pring->prt[i].type == fch_type)) { 2319 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2320 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2321 (phba, pring, saveq); 2322 return 1; 2323 } 2324 } 2325 return 0; 2326 } 2327 2328 /** 2329 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2330 * @phba: Pointer to HBA context object. 2331 * @pring: Pointer to driver SLI ring object. 2332 * @saveq: Pointer to the unsolicited iocb. 2333 * 2334 * This function is called with no lock held by the ring event handler 2335 * when there is an unsolicited iocb posted to the response ring by the 2336 * firmware. This function gets the buffer associated with the iocbs 2337 * and calls the event handler for the ring. This function handles both 2338 * qring buffers and hbq buffers. 2339 * When the function returns 1 the caller can free the iocb object otherwise 2340 * upper layer functions will free the iocb objects. 2341 **/ 2342 static int 2343 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2344 struct lpfc_iocbq *saveq) 2345 { 2346 IOCB_t * irsp; 2347 WORD5 * w5p; 2348 uint32_t Rctl, Type; 2349 uint32_t match; 2350 struct lpfc_iocbq *iocbq; 2351 struct lpfc_dmabuf *dmzbuf; 2352 2353 match = 0; 2354 irsp = &(saveq->iocb); 2355 2356 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2357 if (pring->lpfc_sli_rcv_async_status) 2358 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2359 else 2360 lpfc_printf_log(phba, 2361 KERN_WARNING, 2362 LOG_SLI, 2363 "0316 Ring %d handler: unexpected " 2364 "ASYNC_STATUS iocb received evt_code " 2365 "0x%x\n", 2366 pring->ringno, 2367 irsp->un.asyncstat.evt_code); 2368 return 1; 2369 } 2370 2371 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2372 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2373 if (irsp->ulpBdeCount > 0) { 2374 dmzbuf = lpfc_sli_get_buff(phba, pring, 2375 irsp->un.ulpWord[3]); 2376 lpfc_in_buf_free(phba, dmzbuf); 2377 } 2378 2379 if (irsp->ulpBdeCount > 1) { 2380 dmzbuf = lpfc_sli_get_buff(phba, pring, 2381 irsp->unsli3.sli3Words[3]); 2382 lpfc_in_buf_free(phba, dmzbuf); 2383 } 2384 2385 if (irsp->ulpBdeCount > 2) { 2386 dmzbuf = lpfc_sli_get_buff(phba, pring, 2387 irsp->unsli3.sli3Words[7]); 2388 lpfc_in_buf_free(phba, dmzbuf); 2389 } 2390 2391 return 1; 2392 } 2393 2394 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2395 if (irsp->ulpBdeCount != 0) { 2396 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2397 irsp->un.ulpWord[3]); 2398 if (!saveq->context2) 2399 lpfc_printf_log(phba, 2400 KERN_ERR, 2401 LOG_SLI, 2402 "0341 Ring %d Cannot find buffer for " 2403 "an unsolicited iocb. tag 0x%x\n", 2404 pring->ringno, 2405 irsp->un.ulpWord[3]); 2406 } 2407 if (irsp->ulpBdeCount == 2) { 2408 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2409 irsp->unsli3.sli3Words[7]); 2410 if (!saveq->context3) 2411 lpfc_printf_log(phba, 2412 KERN_ERR, 2413 LOG_SLI, 2414 "0342 Ring %d Cannot find buffer for an" 2415 " unsolicited iocb. tag 0x%x\n", 2416 pring->ringno, 2417 irsp->unsli3.sli3Words[7]); 2418 } 2419 list_for_each_entry(iocbq, &saveq->list, list) { 2420 irsp = &(iocbq->iocb); 2421 if (irsp->ulpBdeCount != 0) { 2422 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2423 irsp->un.ulpWord[3]); 2424 if (!iocbq->context2) 2425 lpfc_printf_log(phba, 2426 KERN_ERR, 2427 LOG_SLI, 2428 "0343 Ring %d Cannot find " 2429 "buffer for an unsolicited iocb" 2430 ". tag 0x%x\n", pring->ringno, 2431 irsp->un.ulpWord[3]); 2432 } 2433 if (irsp->ulpBdeCount == 2) { 2434 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2435 irsp->unsli3.sli3Words[7]); 2436 if (!iocbq->context3) 2437 lpfc_printf_log(phba, 2438 KERN_ERR, 2439 LOG_SLI, 2440 "0344 Ring %d Cannot find " 2441 "buffer for an unsolicited " 2442 "iocb. tag 0x%x\n", 2443 pring->ringno, 2444 irsp->unsli3.sli3Words[7]); 2445 } 2446 } 2447 } 2448 if (irsp->ulpBdeCount != 0 && 2449 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2450 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2451 int found = 0; 2452 2453 /* search continue save q for same XRI */ 2454 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2455 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2456 saveq->iocb.unsli3.rcvsli3.ox_id) { 2457 list_add_tail(&saveq->list, &iocbq->list); 2458 found = 1; 2459 break; 2460 } 2461 } 2462 if (!found) 2463 list_add_tail(&saveq->clist, 2464 &pring->iocb_continue_saveq); 2465 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2466 list_del_init(&iocbq->clist); 2467 saveq = iocbq; 2468 irsp = &(saveq->iocb); 2469 } else 2470 return 0; 2471 } 2472 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2473 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2474 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2475 Rctl = FC_RCTL_ELS_REQ; 2476 Type = FC_TYPE_ELS; 2477 } else { 2478 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2479 Rctl = w5p->hcsw.Rctl; 2480 Type = w5p->hcsw.Type; 2481 2482 /* Firmware Workaround */ 2483 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2484 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2485 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2486 Rctl = FC_RCTL_ELS_REQ; 2487 Type = FC_TYPE_ELS; 2488 w5p->hcsw.Rctl = Rctl; 2489 w5p->hcsw.Type = Type; 2490 } 2491 } 2492 2493 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2494 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2495 "0313 Ring %d handler: unexpected Rctl x%x " 2496 "Type x%x received\n", 2497 pring->ringno, Rctl, Type); 2498 2499 return 1; 2500 } 2501 2502 /** 2503 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2504 * @phba: Pointer to HBA context object. 2505 * @pring: Pointer to driver SLI ring object. 2506 * @prspiocb: Pointer to response iocb object. 2507 * 2508 * This function looks up the iocb_lookup table to get the command iocb 2509 * corresponding to the given response iocb using the iotag of the 2510 * response iocb. This function is called with the hbalock held. 2511 * This function returns the command iocb object if it finds the command 2512 * iocb else returns NULL. 2513 **/ 2514 static struct lpfc_iocbq * 2515 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2516 struct lpfc_sli_ring *pring, 2517 struct lpfc_iocbq *prspiocb) 2518 { 2519 struct lpfc_iocbq *cmd_iocb = NULL; 2520 uint16_t iotag; 2521 2522 iotag = prspiocb->iocb.ulpIoTag; 2523 2524 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2525 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2526 list_del_init(&cmd_iocb->list); 2527 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2528 pring->txcmplq_cnt--; 2529 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2530 } 2531 return cmd_iocb; 2532 } 2533 2534 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2535 "0317 iotag x%x is out off " 2536 "range: max iotag x%x wd0 x%x\n", 2537 iotag, phba->sli.last_iotag, 2538 *(((uint32_t *) &prspiocb->iocb) + 7)); 2539 return NULL; 2540 } 2541 2542 /** 2543 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2544 * @phba: Pointer to HBA context object. 2545 * @pring: Pointer to driver SLI ring object. 2546 * @iotag: IOCB tag. 2547 * 2548 * This function looks up the iocb_lookup table to get the command iocb 2549 * corresponding to the given iotag. This function is called with the 2550 * hbalock held. 2551 * This function returns the command iocb object if it finds the command 2552 * iocb else returns NULL. 2553 **/ 2554 static struct lpfc_iocbq * 2555 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2556 struct lpfc_sli_ring *pring, uint16_t iotag) 2557 { 2558 struct lpfc_iocbq *cmd_iocb; 2559 2560 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2561 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2562 list_del_init(&cmd_iocb->list); 2563 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2564 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2565 pring->txcmplq_cnt--; 2566 } 2567 return cmd_iocb; 2568 } 2569 2570 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2571 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2572 iotag, phba->sli.last_iotag); 2573 return NULL; 2574 } 2575 2576 /** 2577 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2578 * @phba: Pointer to HBA context object. 2579 * @pring: Pointer to driver SLI ring object. 2580 * @saveq: Pointer to the response iocb to be processed. 2581 * 2582 * This function is called by the ring event handler for non-fcp 2583 * rings when there is a new response iocb in the response ring. 2584 * The caller is not required to hold any locks. This function 2585 * gets the command iocb associated with the response iocb and 2586 * calls the completion handler for the command iocb. If there 2587 * is no completion handler, the function will free the resources 2588 * associated with command iocb. If the response iocb is for 2589 * an already aborted command iocb, the status of the completion 2590 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2591 * This function always returns 1. 2592 **/ 2593 static int 2594 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2595 struct lpfc_iocbq *saveq) 2596 { 2597 struct lpfc_iocbq *cmdiocbp; 2598 int rc = 1; 2599 unsigned long iflag; 2600 2601 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2602 spin_lock_irqsave(&phba->hbalock, iflag); 2603 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2604 spin_unlock_irqrestore(&phba->hbalock, iflag); 2605 2606 if (cmdiocbp) { 2607 if (cmdiocbp->iocb_cmpl) { 2608 /* 2609 * If an ELS command failed send an event to mgmt 2610 * application. 2611 */ 2612 if (saveq->iocb.ulpStatus && 2613 (pring->ringno == LPFC_ELS_RING) && 2614 (cmdiocbp->iocb.ulpCommand == 2615 CMD_ELS_REQUEST64_CR)) 2616 lpfc_send_els_failure_event(phba, 2617 cmdiocbp, saveq); 2618 2619 /* 2620 * Post all ELS completions to the worker thread. 2621 * All other are passed to the completion callback. 2622 */ 2623 if (pring->ringno == LPFC_ELS_RING) { 2624 if ((phba->sli_rev < LPFC_SLI_REV4) && 2625 (cmdiocbp->iocb_flag & 2626 LPFC_DRIVER_ABORTED)) { 2627 spin_lock_irqsave(&phba->hbalock, 2628 iflag); 2629 cmdiocbp->iocb_flag &= 2630 ~LPFC_DRIVER_ABORTED; 2631 spin_unlock_irqrestore(&phba->hbalock, 2632 iflag); 2633 saveq->iocb.ulpStatus = 2634 IOSTAT_LOCAL_REJECT; 2635 saveq->iocb.un.ulpWord[4] = 2636 IOERR_SLI_ABORTED; 2637 2638 /* Firmware could still be in progress 2639 * of DMAing payload, so don't free data 2640 * buffer till after a hbeat. 2641 */ 2642 spin_lock_irqsave(&phba->hbalock, 2643 iflag); 2644 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2645 spin_unlock_irqrestore(&phba->hbalock, 2646 iflag); 2647 } 2648 if (phba->sli_rev == LPFC_SLI_REV4) { 2649 if (saveq->iocb_flag & 2650 LPFC_EXCHANGE_BUSY) { 2651 /* Set cmdiocb flag for the 2652 * exchange busy so sgl (xri) 2653 * will not be released until 2654 * the abort xri is received 2655 * from hba. 2656 */ 2657 spin_lock_irqsave( 2658 &phba->hbalock, iflag); 2659 cmdiocbp->iocb_flag |= 2660 LPFC_EXCHANGE_BUSY; 2661 spin_unlock_irqrestore( 2662 &phba->hbalock, iflag); 2663 } 2664 if (cmdiocbp->iocb_flag & 2665 LPFC_DRIVER_ABORTED) { 2666 /* 2667 * Clear LPFC_DRIVER_ABORTED 2668 * bit in case it was driver 2669 * initiated abort. 2670 */ 2671 spin_lock_irqsave( 2672 &phba->hbalock, iflag); 2673 cmdiocbp->iocb_flag &= 2674 ~LPFC_DRIVER_ABORTED; 2675 spin_unlock_irqrestore( 2676 &phba->hbalock, iflag); 2677 cmdiocbp->iocb.ulpStatus = 2678 IOSTAT_LOCAL_REJECT; 2679 cmdiocbp->iocb.un.ulpWord[4] = 2680 IOERR_ABORT_REQUESTED; 2681 /* 2682 * For SLI4, irsiocb contains 2683 * NO_XRI in sli_xritag, it 2684 * shall not affect releasing 2685 * sgl (xri) process. 2686 */ 2687 saveq->iocb.ulpStatus = 2688 IOSTAT_LOCAL_REJECT; 2689 saveq->iocb.un.ulpWord[4] = 2690 IOERR_SLI_ABORTED; 2691 spin_lock_irqsave( 2692 &phba->hbalock, iflag); 2693 saveq->iocb_flag |= 2694 LPFC_DELAY_MEM_FREE; 2695 spin_unlock_irqrestore( 2696 &phba->hbalock, iflag); 2697 } 2698 } 2699 } 2700 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2701 } else 2702 lpfc_sli_release_iocbq(phba, cmdiocbp); 2703 } else { 2704 /* 2705 * Unknown initiating command based on the response iotag. 2706 * This could be the case on the ELS ring because of 2707 * lpfc_els_abort(). 2708 */ 2709 if (pring->ringno != LPFC_ELS_RING) { 2710 /* 2711 * Ring <ringno> handler: unexpected completion IoTag 2712 * <IoTag> 2713 */ 2714 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2715 "0322 Ring %d handler: " 2716 "unexpected completion IoTag x%x " 2717 "Data: x%x x%x x%x x%x\n", 2718 pring->ringno, 2719 saveq->iocb.ulpIoTag, 2720 saveq->iocb.ulpStatus, 2721 saveq->iocb.un.ulpWord[4], 2722 saveq->iocb.ulpCommand, 2723 saveq->iocb.ulpContext); 2724 } 2725 } 2726 2727 return rc; 2728 } 2729 2730 /** 2731 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2732 * @phba: Pointer to HBA context object. 2733 * @pring: Pointer to driver SLI ring object. 2734 * 2735 * This function is called from the iocb ring event handlers when 2736 * put pointer is ahead of the get pointer for a ring. This function signal 2737 * an error attention condition to the worker thread and the worker 2738 * thread will transition the HBA to offline state. 2739 **/ 2740 static void 2741 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2742 { 2743 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2744 /* 2745 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2746 * rsp ring <portRspMax> 2747 */ 2748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2749 "0312 Ring %d handler: portRspPut %d " 2750 "is bigger than rsp ring %d\n", 2751 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2752 pring->numRiocb); 2753 2754 phba->link_state = LPFC_HBA_ERROR; 2755 2756 /* 2757 * All error attention handlers are posted to 2758 * worker thread 2759 */ 2760 phba->work_ha |= HA_ERATT; 2761 phba->work_hs = HS_FFER3; 2762 2763 lpfc_worker_wake_up(phba); 2764 2765 return; 2766 } 2767 2768 /** 2769 * lpfc_poll_eratt - Error attention polling timer timeout handler 2770 * @ptr: Pointer to address of HBA context object. 2771 * 2772 * This function is invoked by the Error Attention polling timer when the 2773 * timer times out. It will check the SLI Error Attention register for 2774 * possible attention events. If so, it will post an Error Attention event 2775 * and wake up worker thread to process it. Otherwise, it will set up the 2776 * Error Attention polling timer for the next poll. 2777 **/ 2778 void lpfc_poll_eratt(unsigned long ptr) 2779 { 2780 struct lpfc_hba *phba; 2781 uint32_t eratt = 0; 2782 2783 phba = (struct lpfc_hba *)ptr; 2784 2785 /* Check chip HA register for error event */ 2786 eratt = lpfc_sli_check_eratt(phba); 2787 2788 if (eratt) 2789 /* Tell the worker thread there is work to do */ 2790 lpfc_worker_wake_up(phba); 2791 else 2792 /* Restart the timer for next eratt poll */ 2793 mod_timer(&phba->eratt_poll, jiffies + 2794 HZ * LPFC_ERATT_POLL_INTERVAL); 2795 return; 2796 } 2797 2798 2799 /** 2800 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2801 * @phba: Pointer to HBA context object. 2802 * @pring: Pointer to driver SLI ring object. 2803 * @mask: Host attention register mask for this ring. 2804 * 2805 * This function is called from the interrupt context when there is a ring 2806 * event for the fcp ring. The caller does not hold any lock. 2807 * The function processes each response iocb in the response ring until it 2808 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 2809 * LE bit set. The function will call the completion handler of the command iocb 2810 * if the response iocb indicates a completion for a command iocb or it is 2811 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2812 * function if this is an unsolicited iocb. 2813 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2814 * to check it explicitly. 2815 */ 2816 int 2817 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2818 struct lpfc_sli_ring *pring, uint32_t mask) 2819 { 2820 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2821 IOCB_t *irsp = NULL; 2822 IOCB_t *entry = NULL; 2823 struct lpfc_iocbq *cmdiocbq = NULL; 2824 struct lpfc_iocbq rspiocbq; 2825 uint32_t status; 2826 uint32_t portRspPut, portRspMax; 2827 int rc = 1; 2828 lpfc_iocb_type type; 2829 unsigned long iflag; 2830 uint32_t rsp_cmpl = 0; 2831 2832 spin_lock_irqsave(&phba->hbalock, iflag); 2833 pring->stats.iocb_event++; 2834 2835 /* 2836 * The next available response entry should never exceed the maximum 2837 * entries. If it does, treat it as an adapter hardware error. 2838 */ 2839 portRspMax = pring->numRiocb; 2840 portRspPut = le32_to_cpu(pgp->rspPutInx); 2841 if (unlikely(portRspPut >= portRspMax)) { 2842 lpfc_sli_rsp_pointers_error(phba, pring); 2843 spin_unlock_irqrestore(&phba->hbalock, iflag); 2844 return 1; 2845 } 2846 if (phba->fcp_ring_in_use) { 2847 spin_unlock_irqrestore(&phba->hbalock, iflag); 2848 return 1; 2849 } else 2850 phba->fcp_ring_in_use = 1; 2851 2852 rmb(); 2853 while (pring->rspidx != portRspPut) { 2854 /* 2855 * Fetch an entry off the ring and copy it into a local data 2856 * structure. The copy involves a byte-swap since the 2857 * network byte order and pci byte orders are different. 2858 */ 2859 entry = lpfc_resp_iocb(phba, pring); 2860 phba->last_completion_time = jiffies; 2861 2862 if (++pring->rspidx >= portRspMax) 2863 pring->rspidx = 0; 2864 2865 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2866 (uint32_t *) &rspiocbq.iocb, 2867 phba->iocb_rsp_size); 2868 INIT_LIST_HEAD(&(rspiocbq.list)); 2869 irsp = &rspiocbq.iocb; 2870 2871 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2872 pring->stats.iocb_rsp++; 2873 rsp_cmpl++; 2874 2875 if (unlikely(irsp->ulpStatus)) { 2876 /* 2877 * If resource errors reported from HBA, reduce 2878 * queuedepths of the SCSI device. 2879 */ 2880 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2881 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2882 spin_unlock_irqrestore(&phba->hbalock, iflag); 2883 phba->lpfc_rampdown_queue_depth(phba); 2884 spin_lock_irqsave(&phba->hbalock, iflag); 2885 } 2886 2887 /* Rsp ring <ringno> error: IOCB */ 2888 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2889 "0336 Rsp Ring %d error: IOCB Data: " 2890 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 2891 pring->ringno, 2892 irsp->un.ulpWord[0], 2893 irsp->un.ulpWord[1], 2894 irsp->un.ulpWord[2], 2895 irsp->un.ulpWord[3], 2896 irsp->un.ulpWord[4], 2897 irsp->un.ulpWord[5], 2898 *(uint32_t *)&irsp->un1, 2899 *((uint32_t *)&irsp->un1 + 1)); 2900 } 2901 2902 switch (type) { 2903 case LPFC_ABORT_IOCB: 2904 case LPFC_SOL_IOCB: 2905 /* 2906 * Idle exchange closed via ABTS from port. No iocb 2907 * resources need to be recovered. 2908 */ 2909 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 2910 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2911 "0333 IOCB cmd 0x%x" 2912 " processed. Skipping" 2913 " completion\n", 2914 irsp->ulpCommand); 2915 break; 2916 } 2917 2918 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2919 &rspiocbq); 2920 if (unlikely(!cmdiocbq)) 2921 break; 2922 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2923 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2924 if (cmdiocbq->iocb_cmpl) { 2925 spin_unlock_irqrestore(&phba->hbalock, iflag); 2926 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2927 &rspiocbq); 2928 spin_lock_irqsave(&phba->hbalock, iflag); 2929 } 2930 break; 2931 case LPFC_UNSOL_IOCB: 2932 spin_unlock_irqrestore(&phba->hbalock, iflag); 2933 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 2934 spin_lock_irqsave(&phba->hbalock, iflag); 2935 break; 2936 default: 2937 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2938 char adaptermsg[LPFC_MAX_ADPTMSG]; 2939 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2940 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2941 MAX_MSG_DATA); 2942 dev_warn(&((phba->pcidev)->dev), 2943 "lpfc%d: %s\n", 2944 phba->brd_no, adaptermsg); 2945 } else { 2946 /* Unknown IOCB command */ 2947 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2948 "0334 Unknown IOCB command " 2949 "Data: x%x, x%x x%x x%x x%x\n", 2950 type, irsp->ulpCommand, 2951 irsp->ulpStatus, 2952 irsp->ulpIoTag, 2953 irsp->ulpContext); 2954 } 2955 break; 2956 } 2957 2958 /* 2959 * The response IOCB has been processed. Update the ring 2960 * pointer in SLIM. If the port response put pointer has not 2961 * been updated, sync the pgp->rspPutInx and fetch the new port 2962 * response put pointer. 2963 */ 2964 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2965 2966 if (pring->rspidx == portRspPut) 2967 portRspPut = le32_to_cpu(pgp->rspPutInx); 2968 } 2969 2970 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 2971 pring->stats.iocb_rsp_full++; 2972 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2973 writel(status, phba->CAregaddr); 2974 readl(phba->CAregaddr); 2975 } 2976 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 2977 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 2978 pring->stats.iocb_cmd_empty++; 2979 2980 /* Force update of the local copy of cmdGetInx */ 2981 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 2982 lpfc_sli_resume_iocb(phba, pring); 2983 2984 if ((pring->lpfc_sli_cmd_available)) 2985 (pring->lpfc_sli_cmd_available) (phba, pring); 2986 2987 } 2988 2989 phba->fcp_ring_in_use = 0; 2990 spin_unlock_irqrestore(&phba->hbalock, iflag); 2991 return rc; 2992 } 2993 2994 /** 2995 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 2996 * @phba: Pointer to HBA context object. 2997 * @pring: Pointer to driver SLI ring object. 2998 * @rspiocbp: Pointer to driver response IOCB object. 2999 * 3000 * This function is called from the worker thread when there is a slow-path 3001 * response IOCB to process. This function chains all the response iocbs until 3002 * seeing the iocb with the LE bit set. The function will call 3003 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3004 * completion of a command iocb. The function will call the 3005 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3006 * The function frees the resources or calls the completion handler if this 3007 * iocb is an abort completion. The function returns NULL when the response 3008 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3009 * this function shall chain the iocb on to the iocb_continueq and return the 3010 * response iocb passed in. 3011 **/ 3012 static struct lpfc_iocbq * 3013 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3014 struct lpfc_iocbq *rspiocbp) 3015 { 3016 struct lpfc_iocbq *saveq; 3017 struct lpfc_iocbq *cmdiocbp; 3018 struct lpfc_iocbq *next_iocb; 3019 IOCB_t *irsp = NULL; 3020 uint32_t free_saveq; 3021 uint8_t iocb_cmd_type; 3022 lpfc_iocb_type type; 3023 unsigned long iflag; 3024 int rc; 3025 3026 spin_lock_irqsave(&phba->hbalock, iflag); 3027 /* First add the response iocb to the countinueq list */ 3028 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3029 pring->iocb_continueq_cnt++; 3030 3031 /* Now, determine whether the list is completed for processing */ 3032 irsp = &rspiocbp->iocb; 3033 if (irsp->ulpLe) { 3034 /* 3035 * By default, the driver expects to free all resources 3036 * associated with this iocb completion. 3037 */ 3038 free_saveq = 1; 3039 saveq = list_get_first(&pring->iocb_continueq, 3040 struct lpfc_iocbq, list); 3041 irsp = &(saveq->iocb); 3042 list_del_init(&pring->iocb_continueq); 3043 pring->iocb_continueq_cnt = 0; 3044 3045 pring->stats.iocb_rsp++; 3046 3047 /* 3048 * If resource errors reported from HBA, reduce 3049 * queuedepths of the SCSI device. 3050 */ 3051 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3052 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3053 spin_unlock_irqrestore(&phba->hbalock, iflag); 3054 phba->lpfc_rampdown_queue_depth(phba); 3055 spin_lock_irqsave(&phba->hbalock, iflag); 3056 } 3057 3058 if (irsp->ulpStatus) { 3059 /* Rsp ring <ringno> error: IOCB */ 3060 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3061 "0328 Rsp Ring %d error: " 3062 "IOCB Data: " 3063 "x%x x%x x%x x%x " 3064 "x%x x%x x%x x%x " 3065 "x%x x%x x%x x%x " 3066 "x%x x%x x%x x%x\n", 3067 pring->ringno, 3068 irsp->un.ulpWord[0], 3069 irsp->un.ulpWord[1], 3070 irsp->un.ulpWord[2], 3071 irsp->un.ulpWord[3], 3072 irsp->un.ulpWord[4], 3073 irsp->un.ulpWord[5], 3074 *(((uint32_t *) irsp) + 6), 3075 *(((uint32_t *) irsp) + 7), 3076 *(((uint32_t *) irsp) + 8), 3077 *(((uint32_t *) irsp) + 9), 3078 *(((uint32_t *) irsp) + 10), 3079 *(((uint32_t *) irsp) + 11), 3080 *(((uint32_t *) irsp) + 12), 3081 *(((uint32_t *) irsp) + 13), 3082 *(((uint32_t *) irsp) + 14), 3083 *(((uint32_t *) irsp) + 15)); 3084 } 3085 3086 /* 3087 * Fetch the IOCB command type and call the correct completion 3088 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3089 * get freed back to the lpfc_iocb_list by the discovery 3090 * kernel thread. 3091 */ 3092 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3093 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3094 switch (type) { 3095 case LPFC_SOL_IOCB: 3096 spin_unlock_irqrestore(&phba->hbalock, iflag); 3097 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3098 spin_lock_irqsave(&phba->hbalock, iflag); 3099 break; 3100 3101 case LPFC_UNSOL_IOCB: 3102 spin_unlock_irqrestore(&phba->hbalock, iflag); 3103 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3104 spin_lock_irqsave(&phba->hbalock, iflag); 3105 if (!rc) 3106 free_saveq = 0; 3107 break; 3108 3109 case LPFC_ABORT_IOCB: 3110 cmdiocbp = NULL; 3111 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3112 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3113 saveq); 3114 if (cmdiocbp) { 3115 /* Call the specified completion routine */ 3116 if (cmdiocbp->iocb_cmpl) { 3117 spin_unlock_irqrestore(&phba->hbalock, 3118 iflag); 3119 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3120 saveq); 3121 spin_lock_irqsave(&phba->hbalock, 3122 iflag); 3123 } else 3124 __lpfc_sli_release_iocbq(phba, 3125 cmdiocbp); 3126 } 3127 break; 3128 3129 case LPFC_UNKNOWN_IOCB: 3130 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3131 char adaptermsg[LPFC_MAX_ADPTMSG]; 3132 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3133 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3134 MAX_MSG_DATA); 3135 dev_warn(&((phba->pcidev)->dev), 3136 "lpfc%d: %s\n", 3137 phba->brd_no, adaptermsg); 3138 } else { 3139 /* Unknown IOCB command */ 3140 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3141 "0335 Unknown IOCB " 3142 "command Data: x%x " 3143 "x%x x%x x%x\n", 3144 irsp->ulpCommand, 3145 irsp->ulpStatus, 3146 irsp->ulpIoTag, 3147 irsp->ulpContext); 3148 } 3149 break; 3150 } 3151 3152 if (free_saveq) { 3153 list_for_each_entry_safe(rspiocbp, next_iocb, 3154 &saveq->list, list) { 3155 list_del(&rspiocbp->list); 3156 __lpfc_sli_release_iocbq(phba, rspiocbp); 3157 } 3158 __lpfc_sli_release_iocbq(phba, saveq); 3159 } 3160 rspiocbp = NULL; 3161 } 3162 spin_unlock_irqrestore(&phba->hbalock, iflag); 3163 return rspiocbp; 3164 } 3165 3166 /** 3167 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3168 * @phba: Pointer to HBA context object. 3169 * @pring: Pointer to driver SLI ring object. 3170 * @mask: Host attention register mask for this ring. 3171 * 3172 * This routine wraps the actual slow_ring event process routine from the 3173 * API jump table function pointer from the lpfc_hba struct. 3174 **/ 3175 void 3176 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3177 struct lpfc_sli_ring *pring, uint32_t mask) 3178 { 3179 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3180 } 3181 3182 /** 3183 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3184 * @phba: Pointer to HBA context object. 3185 * @pring: Pointer to driver SLI ring object. 3186 * @mask: Host attention register mask for this ring. 3187 * 3188 * This function is called from the worker thread when there is a ring event 3189 * for non-fcp rings. The caller does not hold any lock. The function will 3190 * remove each response iocb in the response ring and calls the handle 3191 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3192 **/ 3193 static void 3194 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3195 struct lpfc_sli_ring *pring, uint32_t mask) 3196 { 3197 struct lpfc_pgp *pgp; 3198 IOCB_t *entry; 3199 IOCB_t *irsp = NULL; 3200 struct lpfc_iocbq *rspiocbp = NULL; 3201 uint32_t portRspPut, portRspMax; 3202 unsigned long iflag; 3203 uint32_t status; 3204 3205 pgp = &phba->port_gp[pring->ringno]; 3206 spin_lock_irqsave(&phba->hbalock, iflag); 3207 pring->stats.iocb_event++; 3208 3209 /* 3210 * The next available response entry should never exceed the maximum 3211 * entries. If it does, treat it as an adapter hardware error. 3212 */ 3213 portRspMax = pring->numRiocb; 3214 portRspPut = le32_to_cpu(pgp->rspPutInx); 3215 if (portRspPut >= portRspMax) { 3216 /* 3217 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3218 * rsp ring <portRspMax> 3219 */ 3220 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3221 "0303 Ring %d handler: portRspPut %d " 3222 "is bigger than rsp ring %d\n", 3223 pring->ringno, portRspPut, portRspMax); 3224 3225 phba->link_state = LPFC_HBA_ERROR; 3226 spin_unlock_irqrestore(&phba->hbalock, iflag); 3227 3228 phba->work_hs = HS_FFER3; 3229 lpfc_handle_eratt(phba); 3230 3231 return; 3232 } 3233 3234 rmb(); 3235 while (pring->rspidx != portRspPut) { 3236 /* 3237 * Build a completion list and call the appropriate handler. 3238 * The process is to get the next available response iocb, get 3239 * a free iocb from the list, copy the response data into the 3240 * free iocb, insert to the continuation list, and update the 3241 * next response index to slim. This process makes response 3242 * iocb's in the ring available to DMA as fast as possible but 3243 * pays a penalty for a copy operation. Since the iocb is 3244 * only 32 bytes, this penalty is considered small relative to 3245 * the PCI reads for register values and a slim write. When 3246 * the ulpLe field is set, the entire Command has been 3247 * received. 3248 */ 3249 entry = lpfc_resp_iocb(phba, pring); 3250 3251 phba->last_completion_time = jiffies; 3252 rspiocbp = __lpfc_sli_get_iocbq(phba); 3253 if (rspiocbp == NULL) { 3254 printk(KERN_ERR "%s: out of buffers! Failing " 3255 "completion.\n", __func__); 3256 break; 3257 } 3258 3259 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3260 phba->iocb_rsp_size); 3261 irsp = &rspiocbp->iocb; 3262 3263 if (++pring->rspidx >= portRspMax) 3264 pring->rspidx = 0; 3265 3266 if (pring->ringno == LPFC_ELS_RING) { 3267 lpfc_debugfs_slow_ring_trc(phba, 3268 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3269 *(((uint32_t *) irsp) + 4), 3270 *(((uint32_t *) irsp) + 6), 3271 *(((uint32_t *) irsp) + 7)); 3272 } 3273 3274 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3275 3276 spin_unlock_irqrestore(&phba->hbalock, iflag); 3277 /* Handle the response IOCB */ 3278 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3279 spin_lock_irqsave(&phba->hbalock, iflag); 3280 3281 /* 3282 * If the port response put pointer has not been updated, sync 3283 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3284 * response put pointer. 3285 */ 3286 if (pring->rspidx == portRspPut) { 3287 portRspPut = le32_to_cpu(pgp->rspPutInx); 3288 } 3289 } /* while (pring->rspidx != portRspPut) */ 3290 3291 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3292 /* At least one response entry has been freed */ 3293 pring->stats.iocb_rsp_full++; 3294 /* SET RxRE_RSP in Chip Att register */ 3295 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3296 writel(status, phba->CAregaddr); 3297 readl(phba->CAregaddr); /* flush */ 3298 } 3299 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3300 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3301 pring->stats.iocb_cmd_empty++; 3302 3303 /* Force update of the local copy of cmdGetInx */ 3304 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3305 lpfc_sli_resume_iocb(phba, pring); 3306 3307 if ((pring->lpfc_sli_cmd_available)) 3308 (pring->lpfc_sli_cmd_available) (phba, pring); 3309 3310 } 3311 3312 spin_unlock_irqrestore(&phba->hbalock, iflag); 3313 return; 3314 } 3315 3316 /** 3317 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3318 * @phba: Pointer to HBA context object. 3319 * @pring: Pointer to driver SLI ring object. 3320 * @mask: Host attention register mask for this ring. 3321 * 3322 * This function is called from the worker thread when there is a pending 3323 * ELS response iocb on the driver internal slow-path response iocb worker 3324 * queue. The caller does not hold any lock. The function will remove each 3325 * response iocb from the response worker queue and calls the handle 3326 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3327 **/ 3328 static void 3329 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3330 struct lpfc_sli_ring *pring, uint32_t mask) 3331 { 3332 struct lpfc_iocbq *irspiocbq; 3333 struct hbq_dmabuf *dmabuf; 3334 struct lpfc_cq_event *cq_event; 3335 unsigned long iflag; 3336 3337 spin_lock_irqsave(&phba->hbalock, iflag); 3338 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3339 spin_unlock_irqrestore(&phba->hbalock, iflag); 3340 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3341 /* Get the response iocb from the head of work queue */ 3342 spin_lock_irqsave(&phba->hbalock, iflag); 3343 list_remove_head(&phba->sli4_hba.sp_queue_event, 3344 cq_event, struct lpfc_cq_event, list); 3345 spin_unlock_irqrestore(&phba->hbalock, iflag); 3346 3347 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3348 case CQE_CODE_COMPL_WQE: 3349 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3350 cq_event); 3351 /* Translate ELS WCQE to response IOCBQ */ 3352 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3353 irspiocbq); 3354 if (irspiocbq) 3355 lpfc_sli_sp_handle_rspiocb(phba, pring, 3356 irspiocbq); 3357 break; 3358 case CQE_CODE_RECEIVE: 3359 case CQE_CODE_RECEIVE_V1: 3360 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3361 cq_event); 3362 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3363 break; 3364 default: 3365 break; 3366 } 3367 } 3368 } 3369 3370 /** 3371 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3372 * @phba: Pointer to HBA context object. 3373 * @pring: Pointer to driver SLI ring object. 3374 * 3375 * This function aborts all iocbs in the given ring and frees all the iocb 3376 * objects in txq. This function issues an abort iocb for all the iocb commands 3377 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3378 * the return of this function. The caller is not required to hold any locks. 3379 **/ 3380 void 3381 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3382 { 3383 LIST_HEAD(completions); 3384 struct lpfc_iocbq *iocb, *next_iocb; 3385 3386 if (pring->ringno == LPFC_ELS_RING) { 3387 lpfc_fabric_abort_hba(phba); 3388 } 3389 3390 /* Error everything on txq and txcmplq 3391 * First do the txq. 3392 */ 3393 spin_lock_irq(&phba->hbalock); 3394 list_splice_init(&pring->txq, &completions); 3395 pring->txq_cnt = 0; 3396 3397 /* Next issue ABTS for everything on the txcmplq */ 3398 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3399 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3400 3401 spin_unlock_irq(&phba->hbalock); 3402 3403 /* Cancel all the IOCBs from the completions list */ 3404 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3405 IOERR_SLI_ABORTED); 3406 } 3407 3408 /** 3409 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3410 * @phba: Pointer to HBA context object. 3411 * 3412 * This function flushes all iocbs in the fcp ring and frees all the iocb 3413 * objects in txq and txcmplq. This function will not issue abort iocbs 3414 * for all the iocb commands in txcmplq, they will just be returned with 3415 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3416 * slot has been permanently disabled. 3417 **/ 3418 void 3419 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3420 { 3421 LIST_HEAD(txq); 3422 LIST_HEAD(txcmplq); 3423 struct lpfc_sli *psli = &phba->sli; 3424 struct lpfc_sli_ring *pring; 3425 3426 /* Currently, only one fcp ring */ 3427 pring = &psli->ring[psli->fcp_ring]; 3428 3429 spin_lock_irq(&phba->hbalock); 3430 /* Retrieve everything on txq */ 3431 list_splice_init(&pring->txq, &txq); 3432 pring->txq_cnt = 0; 3433 3434 /* Retrieve everything on the txcmplq */ 3435 list_splice_init(&pring->txcmplq, &txcmplq); 3436 pring->txcmplq_cnt = 0; 3437 spin_unlock_irq(&phba->hbalock); 3438 3439 /* Flush the txq */ 3440 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3441 IOERR_SLI_DOWN); 3442 3443 /* Flush the txcmpq */ 3444 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3445 IOERR_SLI_DOWN); 3446 } 3447 3448 /** 3449 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3450 * @phba: Pointer to HBA context object. 3451 * @mask: Bit mask to be checked. 3452 * 3453 * This function reads the host status register and compares 3454 * with the provided bit mask to check if HBA completed 3455 * the restart. This function will wait in a loop for the 3456 * HBA to complete restart. If the HBA does not restart within 3457 * 15 iterations, the function will reset the HBA again. The 3458 * function returns 1 when HBA fail to restart otherwise returns 3459 * zero. 3460 **/ 3461 static int 3462 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3463 { 3464 uint32_t status; 3465 int i = 0; 3466 int retval = 0; 3467 3468 /* Read the HBA Host Status Register */ 3469 if (lpfc_readl(phba->HSregaddr, &status)) 3470 return 1; 3471 3472 /* 3473 * Check status register every 100ms for 5 retries, then every 3474 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3475 * every 2.5 sec for 4. 3476 * Break our of the loop if errors occurred during init. 3477 */ 3478 while (((status & mask) != mask) && 3479 !(status & HS_FFERM) && 3480 i++ < 20) { 3481 3482 if (i <= 5) 3483 msleep(10); 3484 else if (i <= 10) 3485 msleep(500); 3486 else 3487 msleep(2500); 3488 3489 if (i == 15) { 3490 /* Do post */ 3491 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3492 lpfc_sli_brdrestart(phba); 3493 } 3494 /* Read the HBA Host Status Register */ 3495 if (lpfc_readl(phba->HSregaddr, &status)) { 3496 retval = 1; 3497 break; 3498 } 3499 } 3500 3501 /* Check to see if any errors occurred during init */ 3502 if ((status & HS_FFERM) || (i >= 20)) { 3503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3504 "2751 Adapter failed to restart, " 3505 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3506 status, 3507 readl(phba->MBslimaddr + 0xa8), 3508 readl(phba->MBslimaddr + 0xac)); 3509 phba->link_state = LPFC_HBA_ERROR; 3510 retval = 1; 3511 } 3512 3513 return retval; 3514 } 3515 3516 /** 3517 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3518 * @phba: Pointer to HBA context object. 3519 * @mask: Bit mask to be checked. 3520 * 3521 * This function checks the host status register to check if HBA is 3522 * ready. This function will wait in a loop for the HBA to be ready 3523 * If the HBA is not ready , the function will will reset the HBA PCI 3524 * function again. The function returns 1 when HBA fail to be ready 3525 * otherwise returns zero. 3526 **/ 3527 static int 3528 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3529 { 3530 uint32_t status; 3531 int retval = 0; 3532 3533 /* Read the HBA Host Status Register */ 3534 status = lpfc_sli4_post_status_check(phba); 3535 3536 if (status) { 3537 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3538 lpfc_sli_brdrestart(phba); 3539 status = lpfc_sli4_post_status_check(phba); 3540 } 3541 3542 /* Check to see if any errors occurred during init */ 3543 if (status) { 3544 phba->link_state = LPFC_HBA_ERROR; 3545 retval = 1; 3546 } else 3547 phba->sli4_hba.intr_enable = 0; 3548 3549 return retval; 3550 } 3551 3552 /** 3553 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3554 * @phba: Pointer to HBA context object. 3555 * @mask: Bit mask to be checked. 3556 * 3557 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3558 * from the API jump table function pointer from the lpfc_hba struct. 3559 **/ 3560 int 3561 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3562 { 3563 return phba->lpfc_sli_brdready(phba, mask); 3564 } 3565 3566 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3567 3568 /** 3569 * lpfc_reset_barrier - Make HBA ready for HBA reset 3570 * @phba: Pointer to HBA context object. 3571 * 3572 * This function is called before resetting an HBA. This 3573 * function requests HBA to quiesce DMAs before a reset. 3574 **/ 3575 void lpfc_reset_barrier(struct lpfc_hba *phba) 3576 { 3577 uint32_t __iomem *resp_buf; 3578 uint32_t __iomem *mbox_buf; 3579 volatile uint32_t mbox; 3580 uint32_t hc_copy, ha_copy, resp_data; 3581 int i; 3582 uint8_t hdrtype; 3583 3584 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3585 if (hdrtype != 0x80 || 3586 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3587 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3588 return; 3589 3590 /* 3591 * Tell the other part of the chip to suspend temporarily all 3592 * its DMA activity. 3593 */ 3594 resp_buf = phba->MBslimaddr; 3595 3596 /* Disable the error attention */ 3597 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3598 return; 3599 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3600 readl(phba->HCregaddr); /* flush */ 3601 phba->link_flag |= LS_IGNORE_ERATT; 3602 3603 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3604 return; 3605 if (ha_copy & HA_ERATT) { 3606 /* Clear Chip error bit */ 3607 writel(HA_ERATT, phba->HAregaddr); 3608 phba->pport->stopped = 1; 3609 } 3610 3611 mbox = 0; 3612 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3613 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3614 3615 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3616 mbox_buf = phba->MBslimaddr; 3617 writel(mbox, mbox_buf); 3618 3619 for (i = 0; i < 50; i++) { 3620 if (lpfc_readl((resp_buf + 1), &resp_data)) 3621 return; 3622 if (resp_data != ~(BARRIER_TEST_PATTERN)) 3623 mdelay(1); 3624 else 3625 break; 3626 } 3627 resp_data = 0; 3628 if (lpfc_readl((resp_buf + 1), &resp_data)) 3629 return; 3630 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3631 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3632 phba->pport->stopped) 3633 goto restore_hc; 3634 else 3635 goto clear_errat; 3636 } 3637 3638 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3639 resp_data = 0; 3640 for (i = 0; i < 500; i++) { 3641 if (lpfc_readl(resp_buf, &resp_data)) 3642 return; 3643 if (resp_data != mbox) 3644 mdelay(1); 3645 else 3646 break; 3647 } 3648 3649 clear_errat: 3650 3651 while (++i < 500) { 3652 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3653 return; 3654 if (!(ha_copy & HA_ERATT)) 3655 mdelay(1); 3656 else 3657 break; 3658 } 3659 3660 if (readl(phba->HAregaddr) & HA_ERATT) { 3661 writel(HA_ERATT, phba->HAregaddr); 3662 phba->pport->stopped = 1; 3663 } 3664 3665 restore_hc: 3666 phba->link_flag &= ~LS_IGNORE_ERATT; 3667 writel(hc_copy, phba->HCregaddr); 3668 readl(phba->HCregaddr); /* flush */ 3669 } 3670 3671 /** 3672 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3673 * @phba: Pointer to HBA context object. 3674 * 3675 * This function issues a kill_board mailbox command and waits for 3676 * the error attention interrupt. This function is called for stopping 3677 * the firmware processing. The caller is not required to hold any 3678 * locks. This function calls lpfc_hba_down_post function to free 3679 * any pending commands after the kill. The function will return 1 when it 3680 * fails to kill the board else will return 0. 3681 **/ 3682 int 3683 lpfc_sli_brdkill(struct lpfc_hba *phba) 3684 { 3685 struct lpfc_sli *psli; 3686 LPFC_MBOXQ_t *pmb; 3687 uint32_t status; 3688 uint32_t ha_copy; 3689 int retval; 3690 int i = 0; 3691 3692 psli = &phba->sli; 3693 3694 /* Kill HBA */ 3695 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3696 "0329 Kill HBA Data: x%x x%x\n", 3697 phba->pport->port_state, psli->sli_flag); 3698 3699 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3700 if (!pmb) 3701 return 1; 3702 3703 /* Disable the error attention */ 3704 spin_lock_irq(&phba->hbalock); 3705 if (lpfc_readl(phba->HCregaddr, &status)) { 3706 spin_unlock_irq(&phba->hbalock); 3707 mempool_free(pmb, phba->mbox_mem_pool); 3708 return 1; 3709 } 3710 status &= ~HC_ERINT_ENA; 3711 writel(status, phba->HCregaddr); 3712 readl(phba->HCregaddr); /* flush */ 3713 phba->link_flag |= LS_IGNORE_ERATT; 3714 spin_unlock_irq(&phba->hbalock); 3715 3716 lpfc_kill_board(phba, pmb); 3717 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3718 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3719 3720 if (retval != MBX_SUCCESS) { 3721 if (retval != MBX_BUSY) 3722 mempool_free(pmb, phba->mbox_mem_pool); 3723 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3724 "2752 KILL_BOARD command failed retval %d\n", 3725 retval); 3726 spin_lock_irq(&phba->hbalock); 3727 phba->link_flag &= ~LS_IGNORE_ERATT; 3728 spin_unlock_irq(&phba->hbalock); 3729 return 1; 3730 } 3731 3732 spin_lock_irq(&phba->hbalock); 3733 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3734 spin_unlock_irq(&phba->hbalock); 3735 3736 mempool_free(pmb, phba->mbox_mem_pool); 3737 3738 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3739 * attention every 100ms for 3 seconds. If we don't get ERATT after 3740 * 3 seconds we still set HBA_ERROR state because the status of the 3741 * board is now undefined. 3742 */ 3743 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3744 return 1; 3745 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3746 mdelay(100); 3747 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3748 return 1; 3749 } 3750 3751 del_timer_sync(&psli->mbox_tmo); 3752 if (ha_copy & HA_ERATT) { 3753 writel(HA_ERATT, phba->HAregaddr); 3754 phba->pport->stopped = 1; 3755 } 3756 spin_lock_irq(&phba->hbalock); 3757 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3758 psli->mbox_active = NULL; 3759 phba->link_flag &= ~LS_IGNORE_ERATT; 3760 spin_unlock_irq(&phba->hbalock); 3761 3762 lpfc_hba_down_post(phba); 3763 phba->link_state = LPFC_HBA_ERROR; 3764 3765 return ha_copy & HA_ERATT ? 0 : 1; 3766 } 3767 3768 /** 3769 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3770 * @phba: Pointer to HBA context object. 3771 * 3772 * This function resets the HBA by writing HC_INITFF to the control 3773 * register. After the HBA resets, this function resets all the iocb ring 3774 * indices. This function disables PCI layer parity checking during 3775 * the reset. 3776 * This function returns 0 always. 3777 * The caller is not required to hold any locks. 3778 **/ 3779 int 3780 lpfc_sli_brdreset(struct lpfc_hba *phba) 3781 { 3782 struct lpfc_sli *psli; 3783 struct lpfc_sli_ring *pring; 3784 uint16_t cfg_value; 3785 int i; 3786 3787 psli = &phba->sli; 3788 3789 /* Reset HBA */ 3790 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3791 "0325 Reset HBA Data: x%x x%x\n", 3792 phba->pport->port_state, psli->sli_flag); 3793 3794 /* perform board reset */ 3795 phba->fc_eventTag = 0; 3796 phba->link_events = 0; 3797 phba->pport->fc_myDID = 0; 3798 phba->pport->fc_prevDID = 0; 3799 3800 /* Turn off parity checking and serr during the physical reset */ 3801 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3802 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3803 (cfg_value & 3804 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3805 3806 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3807 3808 /* Now toggle INITFF bit in the Host Control Register */ 3809 writel(HC_INITFF, phba->HCregaddr); 3810 mdelay(1); 3811 readl(phba->HCregaddr); /* flush */ 3812 writel(0, phba->HCregaddr); 3813 readl(phba->HCregaddr); /* flush */ 3814 3815 /* Restore PCI cmd register */ 3816 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3817 3818 /* Initialize relevant SLI info */ 3819 for (i = 0; i < psli->num_rings; i++) { 3820 pring = &psli->ring[i]; 3821 pring->flag = 0; 3822 pring->rspidx = 0; 3823 pring->next_cmdidx = 0; 3824 pring->local_getidx = 0; 3825 pring->cmdidx = 0; 3826 pring->missbufcnt = 0; 3827 } 3828 3829 phba->link_state = LPFC_WARM_START; 3830 return 0; 3831 } 3832 3833 /** 3834 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3835 * @phba: Pointer to HBA context object. 3836 * 3837 * This function resets a SLI4 HBA. This function disables PCI layer parity 3838 * checking during resets the device. The caller is not required to hold 3839 * any locks. 3840 * 3841 * This function returns 0 always. 3842 **/ 3843 int 3844 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3845 { 3846 struct lpfc_sli *psli = &phba->sli; 3847 uint16_t cfg_value; 3848 uint8_t qindx; 3849 3850 /* Reset HBA */ 3851 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3852 "0295 Reset HBA Data: x%x x%x\n", 3853 phba->pport->port_state, psli->sli_flag); 3854 3855 /* perform board reset */ 3856 phba->fc_eventTag = 0; 3857 phba->link_events = 0; 3858 phba->pport->fc_myDID = 0; 3859 phba->pport->fc_prevDID = 0; 3860 3861 spin_lock_irq(&phba->hbalock); 3862 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3863 phba->fcf.fcf_flag = 0; 3864 /* Clean up the child queue list for the CQs */ 3865 list_del_init(&phba->sli4_hba.mbx_wq->list); 3866 list_del_init(&phba->sli4_hba.els_wq->list); 3867 list_del_init(&phba->sli4_hba.hdr_rq->list); 3868 list_del_init(&phba->sli4_hba.dat_rq->list); 3869 list_del_init(&phba->sli4_hba.mbx_cq->list); 3870 list_del_init(&phba->sli4_hba.els_cq->list); 3871 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3872 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3873 qindx = 0; 3874 do 3875 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3876 while (++qindx < phba->cfg_fcp_eq_count); 3877 spin_unlock_irq(&phba->hbalock); 3878 3879 /* Now physically reset the device */ 3880 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3881 "0389 Performing PCI function reset!\n"); 3882 3883 /* Turn off parity checking and serr during the physical reset */ 3884 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3885 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3886 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3887 3888 /* Perform FCoE PCI function reset */ 3889 lpfc_pci_function_reset(phba); 3890 3891 /* Restore PCI cmd register */ 3892 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3893 3894 return 0; 3895 } 3896 3897 /** 3898 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3899 * @phba: Pointer to HBA context object. 3900 * 3901 * This function is called in the SLI initialization code path to 3902 * restart the HBA. The caller is not required to hold any lock. 3903 * This function writes MBX_RESTART mailbox command to the SLIM and 3904 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 3905 * function to free any pending commands. The function enables 3906 * POST only during the first initialization. The function returns zero. 3907 * The function does not guarantee completion of MBX_RESTART mailbox 3908 * command before the return of this function. 3909 **/ 3910 static int 3911 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 3912 { 3913 MAILBOX_t *mb; 3914 struct lpfc_sli *psli; 3915 volatile uint32_t word0; 3916 void __iomem *to_slim; 3917 uint32_t hba_aer_enabled; 3918 3919 spin_lock_irq(&phba->hbalock); 3920 3921 /* Take PCIe device Advanced Error Reporting (AER) state */ 3922 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3923 3924 psli = &phba->sli; 3925 3926 /* Restart HBA */ 3927 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3928 "0337 Restart HBA Data: x%x x%x\n", 3929 phba->pport->port_state, psli->sli_flag); 3930 3931 word0 = 0; 3932 mb = (MAILBOX_t *) &word0; 3933 mb->mbxCommand = MBX_RESTART; 3934 mb->mbxHc = 1; 3935 3936 lpfc_reset_barrier(phba); 3937 3938 to_slim = phba->MBslimaddr; 3939 writel(*(uint32_t *) mb, to_slim); 3940 readl(to_slim); /* flush */ 3941 3942 /* Only skip post after fc_ffinit is completed */ 3943 if (phba->pport->port_state) 3944 word0 = 1; /* This is really setting up word1 */ 3945 else 3946 word0 = 0; /* This is really setting up word1 */ 3947 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3948 writel(*(uint32_t *) mb, to_slim); 3949 readl(to_slim); /* flush */ 3950 3951 lpfc_sli_brdreset(phba); 3952 phba->pport->stopped = 0; 3953 phba->link_state = LPFC_INIT_START; 3954 phba->hba_flag = 0; 3955 spin_unlock_irq(&phba->hbalock); 3956 3957 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3958 psli->stats_start = get_seconds(); 3959 3960 /* Give the INITFF and Post time to settle. */ 3961 mdelay(100); 3962 3963 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3964 if (hba_aer_enabled) 3965 pci_disable_pcie_error_reporting(phba->pcidev); 3966 3967 lpfc_hba_down_post(phba); 3968 3969 return 0; 3970 } 3971 3972 /** 3973 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 3974 * @phba: Pointer to HBA context object. 3975 * 3976 * This function is called in the SLI initialization code path to restart 3977 * a SLI4 HBA. The caller is not required to hold any lock. 3978 * At the end of the function, it calls lpfc_hba_down_post function to 3979 * free any pending commands. 3980 **/ 3981 static int 3982 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 3983 { 3984 struct lpfc_sli *psli = &phba->sli; 3985 uint32_t hba_aer_enabled; 3986 3987 /* Restart HBA */ 3988 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3989 "0296 Restart HBA Data: x%x x%x\n", 3990 phba->pport->port_state, psli->sli_flag); 3991 3992 /* Take PCIe device Advanced Error Reporting (AER) state */ 3993 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3994 3995 lpfc_sli4_brdreset(phba); 3996 3997 spin_lock_irq(&phba->hbalock); 3998 phba->pport->stopped = 0; 3999 phba->link_state = LPFC_INIT_START; 4000 phba->hba_flag = 0; 4001 spin_unlock_irq(&phba->hbalock); 4002 4003 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4004 psli->stats_start = get_seconds(); 4005 4006 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4007 if (hba_aer_enabled) 4008 pci_disable_pcie_error_reporting(phba->pcidev); 4009 4010 lpfc_hba_down_post(phba); 4011 4012 return 0; 4013 } 4014 4015 /** 4016 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4017 * @phba: Pointer to HBA context object. 4018 * 4019 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4020 * API jump table function pointer from the lpfc_hba struct. 4021 **/ 4022 int 4023 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4024 { 4025 return phba->lpfc_sli_brdrestart(phba); 4026 } 4027 4028 /** 4029 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4030 * @phba: Pointer to HBA context object. 4031 * 4032 * This function is called after a HBA restart to wait for successful 4033 * restart of the HBA. Successful restart of the HBA is indicated by 4034 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4035 * iteration, the function will restart the HBA again. The function returns 4036 * zero if HBA successfully restarted else returns negative error code. 4037 **/ 4038 static int 4039 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4040 { 4041 uint32_t status, i = 0; 4042 4043 /* Read the HBA Host Status Register */ 4044 if (lpfc_readl(phba->HSregaddr, &status)) 4045 return -EIO; 4046 4047 /* Check status register to see what current state is */ 4048 i = 0; 4049 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4050 4051 /* Check every 10ms for 10 retries, then every 100ms for 90 4052 * retries, then every 1 sec for 50 retires for a total of 4053 * ~60 seconds before reset the board again and check every 4054 * 1 sec for 50 retries. The up to 60 seconds before the 4055 * board ready is required by the Falcon FIPS zeroization 4056 * complete, and any reset the board in between shall cause 4057 * restart of zeroization, further delay the board ready. 4058 */ 4059 if (i++ >= 200) { 4060 /* Adapter failed to init, timeout, status reg 4061 <status> */ 4062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4063 "0436 Adapter failed to init, " 4064 "timeout, status reg x%x, " 4065 "FW Data: A8 x%x AC x%x\n", status, 4066 readl(phba->MBslimaddr + 0xa8), 4067 readl(phba->MBslimaddr + 0xac)); 4068 phba->link_state = LPFC_HBA_ERROR; 4069 return -ETIMEDOUT; 4070 } 4071 4072 /* Check to see if any errors occurred during init */ 4073 if (status & HS_FFERM) { 4074 /* ERROR: During chipset initialization */ 4075 /* Adapter failed to init, chipset, status reg 4076 <status> */ 4077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4078 "0437 Adapter failed to init, " 4079 "chipset, status reg x%x, " 4080 "FW Data: A8 x%x AC x%x\n", status, 4081 readl(phba->MBslimaddr + 0xa8), 4082 readl(phba->MBslimaddr + 0xac)); 4083 phba->link_state = LPFC_HBA_ERROR; 4084 return -EIO; 4085 } 4086 4087 if (i <= 10) 4088 msleep(10); 4089 else if (i <= 100) 4090 msleep(100); 4091 else 4092 msleep(1000); 4093 4094 if (i == 150) { 4095 /* Do post */ 4096 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4097 lpfc_sli_brdrestart(phba); 4098 } 4099 /* Read the HBA Host Status Register */ 4100 if (lpfc_readl(phba->HSregaddr, &status)) 4101 return -EIO; 4102 } 4103 4104 /* Check to see if any errors occurred during init */ 4105 if (status & HS_FFERM) { 4106 /* ERROR: During chipset initialization */ 4107 /* Adapter failed to init, chipset, status reg <status> */ 4108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4109 "0438 Adapter failed to init, chipset, " 4110 "status reg x%x, " 4111 "FW Data: A8 x%x AC x%x\n", status, 4112 readl(phba->MBslimaddr + 0xa8), 4113 readl(phba->MBslimaddr + 0xac)); 4114 phba->link_state = LPFC_HBA_ERROR; 4115 return -EIO; 4116 } 4117 4118 /* Clear all interrupt enable conditions */ 4119 writel(0, phba->HCregaddr); 4120 readl(phba->HCregaddr); /* flush */ 4121 4122 /* setup host attn register */ 4123 writel(0xffffffff, phba->HAregaddr); 4124 readl(phba->HAregaddr); /* flush */ 4125 return 0; 4126 } 4127 4128 /** 4129 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4130 * 4131 * This function calculates and returns the number of HBQs required to be 4132 * configured. 4133 **/ 4134 int 4135 lpfc_sli_hbq_count(void) 4136 { 4137 return ARRAY_SIZE(lpfc_hbq_defs); 4138 } 4139 4140 /** 4141 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4142 * 4143 * This function adds the number of hbq entries in every HBQ to get 4144 * the total number of hbq entries required for the HBA and returns 4145 * the total count. 4146 **/ 4147 static int 4148 lpfc_sli_hbq_entry_count(void) 4149 { 4150 int hbq_count = lpfc_sli_hbq_count(); 4151 int count = 0; 4152 int i; 4153 4154 for (i = 0; i < hbq_count; ++i) 4155 count += lpfc_hbq_defs[i]->entry_count; 4156 return count; 4157 } 4158 4159 /** 4160 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4161 * 4162 * This function calculates amount of memory required for all hbq entries 4163 * to be configured and returns the total memory required. 4164 **/ 4165 int 4166 lpfc_sli_hbq_size(void) 4167 { 4168 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4169 } 4170 4171 /** 4172 * lpfc_sli_hbq_setup - configure and initialize HBQs 4173 * @phba: Pointer to HBA context object. 4174 * 4175 * This function is called during the SLI initialization to configure 4176 * all the HBQs and post buffers to the HBQ. The caller is not 4177 * required to hold any locks. This function will return zero if successful 4178 * else it will return negative error code. 4179 **/ 4180 static int 4181 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4182 { 4183 int hbq_count = lpfc_sli_hbq_count(); 4184 LPFC_MBOXQ_t *pmb; 4185 MAILBOX_t *pmbox; 4186 uint32_t hbqno; 4187 uint32_t hbq_entry_index; 4188 4189 /* Get a Mailbox buffer to setup mailbox 4190 * commands for HBA initialization 4191 */ 4192 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4193 4194 if (!pmb) 4195 return -ENOMEM; 4196 4197 pmbox = &pmb->u.mb; 4198 4199 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4200 phba->link_state = LPFC_INIT_MBX_CMDS; 4201 phba->hbq_in_use = 1; 4202 4203 hbq_entry_index = 0; 4204 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4205 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4206 phba->hbqs[hbqno].hbqPutIdx = 0; 4207 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4208 phba->hbqs[hbqno].entry_count = 4209 lpfc_hbq_defs[hbqno]->entry_count; 4210 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4211 hbq_entry_index, pmb); 4212 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4213 4214 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4215 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4216 mbxStatus <status>, ring <num> */ 4217 4218 lpfc_printf_log(phba, KERN_ERR, 4219 LOG_SLI | LOG_VPORT, 4220 "1805 Adapter failed to init. " 4221 "Data: x%x x%x x%x\n", 4222 pmbox->mbxCommand, 4223 pmbox->mbxStatus, hbqno); 4224 4225 phba->link_state = LPFC_HBA_ERROR; 4226 mempool_free(pmb, phba->mbox_mem_pool); 4227 return -ENXIO; 4228 } 4229 } 4230 phba->hbq_count = hbq_count; 4231 4232 mempool_free(pmb, phba->mbox_mem_pool); 4233 4234 /* Initially populate or replenish the HBQs */ 4235 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4236 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4237 return 0; 4238 } 4239 4240 /** 4241 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4242 * @phba: Pointer to HBA context object. 4243 * 4244 * This function is called during the SLI initialization to configure 4245 * all the HBQs and post buffers to the HBQ. The caller is not 4246 * required to hold any locks. This function will return zero if successful 4247 * else it will return negative error code. 4248 **/ 4249 static int 4250 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4251 { 4252 phba->hbq_in_use = 1; 4253 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4254 phba->hbq_count = 1; 4255 /* Initially populate or replenish the HBQs */ 4256 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4257 return 0; 4258 } 4259 4260 /** 4261 * lpfc_sli_config_port - Issue config port mailbox command 4262 * @phba: Pointer to HBA context object. 4263 * @sli_mode: sli mode - 2/3 4264 * 4265 * This function is called by the sli intialization code path 4266 * to issue config_port mailbox command. This function restarts the 4267 * HBA firmware and issues a config_port mailbox command to configure 4268 * the SLI interface in the sli mode specified by sli_mode 4269 * variable. The caller is not required to hold any locks. 4270 * The function returns 0 if successful, else returns negative error 4271 * code. 4272 **/ 4273 int 4274 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4275 { 4276 LPFC_MBOXQ_t *pmb; 4277 uint32_t resetcount = 0, rc = 0, done = 0; 4278 4279 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4280 if (!pmb) { 4281 phba->link_state = LPFC_HBA_ERROR; 4282 return -ENOMEM; 4283 } 4284 4285 phba->sli_rev = sli_mode; 4286 while (resetcount < 2 && !done) { 4287 spin_lock_irq(&phba->hbalock); 4288 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4289 spin_unlock_irq(&phba->hbalock); 4290 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4291 lpfc_sli_brdrestart(phba); 4292 rc = lpfc_sli_chipset_init(phba); 4293 if (rc) 4294 break; 4295 4296 spin_lock_irq(&phba->hbalock); 4297 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4298 spin_unlock_irq(&phba->hbalock); 4299 resetcount++; 4300 4301 /* Call pre CONFIG_PORT mailbox command initialization. A 4302 * value of 0 means the call was successful. Any other 4303 * nonzero value is a failure, but if ERESTART is returned, 4304 * the driver may reset the HBA and try again. 4305 */ 4306 rc = lpfc_config_port_prep(phba); 4307 if (rc == -ERESTART) { 4308 phba->link_state = LPFC_LINK_UNKNOWN; 4309 continue; 4310 } else if (rc) 4311 break; 4312 4313 phba->link_state = LPFC_INIT_MBX_CMDS; 4314 lpfc_config_port(phba, pmb); 4315 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4316 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4317 LPFC_SLI3_HBQ_ENABLED | 4318 LPFC_SLI3_CRP_ENABLED | 4319 LPFC_SLI3_BG_ENABLED | 4320 LPFC_SLI3_DSS_ENABLED); 4321 if (rc != MBX_SUCCESS) { 4322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4323 "0442 Adapter failed to init, mbxCmd x%x " 4324 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4325 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4326 spin_lock_irq(&phba->hbalock); 4327 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4328 spin_unlock_irq(&phba->hbalock); 4329 rc = -ENXIO; 4330 } else { 4331 /* Allow asynchronous mailbox command to go through */ 4332 spin_lock_irq(&phba->hbalock); 4333 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4334 spin_unlock_irq(&phba->hbalock); 4335 done = 1; 4336 } 4337 } 4338 if (!done) { 4339 rc = -EINVAL; 4340 goto do_prep_failed; 4341 } 4342 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4343 if (!pmb->u.mb.un.varCfgPort.cMA) { 4344 rc = -ENXIO; 4345 goto do_prep_failed; 4346 } 4347 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4348 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4349 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4350 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4351 phba->max_vpi : phba->max_vports; 4352 4353 } else 4354 phba->max_vpi = 0; 4355 phba->fips_level = 0; 4356 phba->fips_spec_rev = 0; 4357 if (pmb->u.mb.un.varCfgPort.gdss) { 4358 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4359 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4360 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4361 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4362 "2850 Security Crypto Active. FIPS x%d " 4363 "(Spec Rev: x%d)", 4364 phba->fips_level, phba->fips_spec_rev); 4365 } 4366 if (pmb->u.mb.un.varCfgPort.sec_err) { 4367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4368 "2856 Config Port Security Crypto " 4369 "Error: x%x ", 4370 pmb->u.mb.un.varCfgPort.sec_err); 4371 } 4372 if (pmb->u.mb.un.varCfgPort.gerbm) 4373 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4374 if (pmb->u.mb.un.varCfgPort.gcrp) 4375 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4376 4377 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4378 phba->port_gp = phba->mbox->us.s3_pgp.port; 4379 4380 if (phba->cfg_enable_bg) { 4381 if (pmb->u.mb.un.varCfgPort.gbg) 4382 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4383 else 4384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4385 "0443 Adapter did not grant " 4386 "BlockGuard\n"); 4387 } 4388 } else { 4389 phba->hbq_get = NULL; 4390 phba->port_gp = phba->mbox->us.s2.port; 4391 phba->max_vpi = 0; 4392 } 4393 do_prep_failed: 4394 mempool_free(pmb, phba->mbox_mem_pool); 4395 return rc; 4396 } 4397 4398 4399 /** 4400 * lpfc_sli_hba_setup - SLI intialization function 4401 * @phba: Pointer to HBA context object. 4402 * 4403 * This function is the main SLI intialization function. This function 4404 * is called by the HBA intialization code, HBA reset code and HBA 4405 * error attention handler code. Caller is not required to hold any 4406 * locks. This function issues config_port mailbox command to configure 4407 * the SLI, setup iocb rings and HBQ rings. In the end the function 4408 * calls the config_port_post function to issue init_link mailbox 4409 * command and to start the discovery. The function will return zero 4410 * if successful, else it will return negative error code. 4411 **/ 4412 int 4413 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4414 { 4415 uint32_t rc; 4416 int mode = 3, i; 4417 int longs; 4418 4419 switch (lpfc_sli_mode) { 4420 case 2: 4421 if (phba->cfg_enable_npiv) { 4422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4423 "1824 NPIV enabled: Override lpfc_sli_mode " 4424 "parameter (%d) to auto (0).\n", 4425 lpfc_sli_mode); 4426 break; 4427 } 4428 mode = 2; 4429 break; 4430 case 0: 4431 case 3: 4432 break; 4433 default: 4434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4435 "1819 Unrecognized lpfc_sli_mode " 4436 "parameter: %d.\n", lpfc_sli_mode); 4437 4438 break; 4439 } 4440 4441 rc = lpfc_sli_config_port(phba, mode); 4442 4443 if (rc && lpfc_sli_mode == 3) 4444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4445 "1820 Unable to select SLI-3. " 4446 "Not supported by adapter.\n"); 4447 if (rc && mode != 2) 4448 rc = lpfc_sli_config_port(phba, 2); 4449 if (rc) 4450 goto lpfc_sli_hba_setup_error; 4451 4452 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4453 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4454 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4455 if (!rc) { 4456 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4457 "2709 This device supports " 4458 "Advanced Error Reporting (AER)\n"); 4459 spin_lock_irq(&phba->hbalock); 4460 phba->hba_flag |= HBA_AER_ENABLED; 4461 spin_unlock_irq(&phba->hbalock); 4462 } else { 4463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4464 "2708 This device does not support " 4465 "Advanced Error Reporting (AER)\n"); 4466 phba->cfg_aer_support = 0; 4467 } 4468 } 4469 4470 if (phba->sli_rev == 3) { 4471 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4472 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4473 } else { 4474 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4475 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4476 phba->sli3_options = 0; 4477 } 4478 4479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4480 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4481 phba->sli_rev, phba->max_vpi); 4482 rc = lpfc_sli_ring_map(phba); 4483 4484 if (rc) 4485 goto lpfc_sli_hba_setup_error; 4486 4487 /* Initialize VPIs. */ 4488 if (phba->sli_rev == LPFC_SLI_REV3) { 4489 /* 4490 * The VPI bitmask and physical ID array are allocated 4491 * and initialized once only - at driver load. A port 4492 * reset doesn't need to reinitialize this memory. 4493 */ 4494 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4495 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4496 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4497 GFP_KERNEL); 4498 if (!phba->vpi_bmask) { 4499 rc = -ENOMEM; 4500 goto lpfc_sli_hba_setup_error; 4501 } 4502 4503 phba->vpi_ids = kzalloc( 4504 (phba->max_vpi+1) * sizeof(uint16_t), 4505 GFP_KERNEL); 4506 if (!phba->vpi_ids) { 4507 kfree(phba->vpi_bmask); 4508 rc = -ENOMEM; 4509 goto lpfc_sli_hba_setup_error; 4510 } 4511 for (i = 0; i < phba->max_vpi; i++) 4512 phba->vpi_ids[i] = i; 4513 } 4514 } 4515 4516 /* Init HBQs */ 4517 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4518 rc = lpfc_sli_hbq_setup(phba); 4519 if (rc) 4520 goto lpfc_sli_hba_setup_error; 4521 } 4522 spin_lock_irq(&phba->hbalock); 4523 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4524 spin_unlock_irq(&phba->hbalock); 4525 4526 rc = lpfc_config_port_post(phba); 4527 if (rc) 4528 goto lpfc_sli_hba_setup_error; 4529 4530 return rc; 4531 4532 lpfc_sli_hba_setup_error: 4533 phba->link_state = LPFC_HBA_ERROR; 4534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4535 "0445 Firmware initialization failed\n"); 4536 return rc; 4537 } 4538 4539 /** 4540 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4541 * @phba: Pointer to HBA context object. 4542 * @mboxq: mailbox pointer. 4543 * This function issue a dump mailbox command to read config region 4544 * 23 and parse the records in the region and populate driver 4545 * data structure. 4546 **/ 4547 static int 4548 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, 4549 LPFC_MBOXQ_t *mboxq) 4550 { 4551 struct lpfc_dmabuf *mp; 4552 struct lpfc_mqe *mqe; 4553 uint32_t data_length; 4554 int rc; 4555 4556 /* Program the default value of vlan_id and fc_map */ 4557 phba->valid_vlan = 0; 4558 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4559 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4560 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4561 4562 mqe = &mboxq->u.mqe; 4563 if (lpfc_dump_fcoe_param(phba, mboxq)) 4564 return -ENOMEM; 4565 4566 mp = (struct lpfc_dmabuf *) mboxq->context1; 4567 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4568 4569 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4570 "(%d):2571 Mailbox cmd x%x Status x%x " 4571 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4572 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4573 "CQ: x%x x%x x%x x%x\n", 4574 mboxq->vport ? mboxq->vport->vpi : 0, 4575 bf_get(lpfc_mqe_command, mqe), 4576 bf_get(lpfc_mqe_status, mqe), 4577 mqe->un.mb_words[0], mqe->un.mb_words[1], 4578 mqe->un.mb_words[2], mqe->un.mb_words[3], 4579 mqe->un.mb_words[4], mqe->un.mb_words[5], 4580 mqe->un.mb_words[6], mqe->un.mb_words[7], 4581 mqe->un.mb_words[8], mqe->un.mb_words[9], 4582 mqe->un.mb_words[10], mqe->un.mb_words[11], 4583 mqe->un.mb_words[12], mqe->un.mb_words[13], 4584 mqe->un.mb_words[14], mqe->un.mb_words[15], 4585 mqe->un.mb_words[16], mqe->un.mb_words[50], 4586 mboxq->mcqe.word0, 4587 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4588 mboxq->mcqe.trailer); 4589 4590 if (rc) { 4591 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4592 kfree(mp); 4593 return -EIO; 4594 } 4595 data_length = mqe->un.mb_words[5]; 4596 if (data_length > DMP_RGN23_SIZE) { 4597 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4598 kfree(mp); 4599 return -EIO; 4600 } 4601 4602 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4603 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4604 kfree(mp); 4605 return 0; 4606 } 4607 4608 /** 4609 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4610 * @phba: pointer to lpfc hba data structure. 4611 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4612 * @vpd: pointer to the memory to hold resulting port vpd data. 4613 * @vpd_size: On input, the number of bytes allocated to @vpd. 4614 * On output, the number of data bytes in @vpd. 4615 * 4616 * This routine executes a READ_REV SLI4 mailbox command. In 4617 * addition, this routine gets the port vpd data. 4618 * 4619 * Return codes 4620 * 0 - successful 4621 * -ENOMEM - could not allocated memory. 4622 **/ 4623 static int 4624 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4625 uint8_t *vpd, uint32_t *vpd_size) 4626 { 4627 int rc = 0; 4628 uint32_t dma_size; 4629 struct lpfc_dmabuf *dmabuf; 4630 struct lpfc_mqe *mqe; 4631 4632 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4633 if (!dmabuf) 4634 return -ENOMEM; 4635 4636 /* 4637 * Get a DMA buffer for the vpd data resulting from the READ_REV 4638 * mailbox command. 4639 */ 4640 dma_size = *vpd_size; 4641 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4642 dma_size, 4643 &dmabuf->phys, 4644 GFP_KERNEL); 4645 if (!dmabuf->virt) { 4646 kfree(dmabuf); 4647 return -ENOMEM; 4648 } 4649 memset(dmabuf->virt, 0, dma_size); 4650 4651 /* 4652 * The SLI4 implementation of READ_REV conflicts at word1, 4653 * bits 31:16 and SLI4 adds vpd functionality not present 4654 * in SLI3. This code corrects the conflicts. 4655 */ 4656 lpfc_read_rev(phba, mboxq); 4657 mqe = &mboxq->u.mqe; 4658 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4659 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4660 mqe->un.read_rev.word1 &= 0x0000FFFF; 4661 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4662 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4663 4664 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4665 if (rc) { 4666 dma_free_coherent(&phba->pcidev->dev, dma_size, 4667 dmabuf->virt, dmabuf->phys); 4668 kfree(dmabuf); 4669 return -EIO; 4670 } 4671 4672 /* 4673 * The available vpd length cannot be bigger than the 4674 * DMA buffer passed to the port. Catch the less than 4675 * case and update the caller's size. 4676 */ 4677 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4678 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4679 4680 memcpy(vpd, dmabuf->virt, *vpd_size); 4681 4682 dma_free_coherent(&phba->pcidev->dev, dma_size, 4683 dmabuf->virt, dmabuf->phys); 4684 kfree(dmabuf); 4685 return 0; 4686 } 4687 4688 /** 4689 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4690 * @phba: pointer to lpfc hba data structure. 4691 * 4692 * This routine is called to explicitly arm the SLI4 device's completion and 4693 * event queues 4694 **/ 4695 static void 4696 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4697 { 4698 uint8_t fcp_eqidx; 4699 4700 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4701 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4702 fcp_eqidx = 0; 4703 do 4704 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4705 LPFC_QUEUE_REARM); 4706 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4707 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4708 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4709 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4710 LPFC_QUEUE_REARM); 4711 } 4712 4713 /** 4714 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4715 * @phba: Pointer to HBA context object. 4716 * @type: The resource extent type. 4717 * @extnt_count: buffer to hold port available extent count. 4718 * @extnt_size: buffer to hold element count per extent. 4719 * 4720 * This function calls the port and retrievs the number of available 4721 * extents and their size for a particular extent type. 4722 * 4723 * Returns: 0 if successful. Nonzero otherwise. 4724 **/ 4725 int 4726 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4727 uint16_t *extnt_count, uint16_t *extnt_size) 4728 { 4729 int rc = 0; 4730 uint32_t length; 4731 uint32_t mbox_tmo; 4732 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 4733 LPFC_MBOXQ_t *mbox; 4734 4735 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4736 if (!mbox) 4737 return -ENOMEM; 4738 4739 /* Find out how many extents are available for this resource type */ 4740 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 4741 sizeof(struct lpfc_sli4_cfg_mhdr)); 4742 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 4743 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 4744 length, LPFC_SLI4_MBX_EMBED); 4745 4746 /* Send an extents count of 0 - the GET doesn't use it. */ 4747 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 4748 LPFC_SLI4_MBX_EMBED); 4749 if (unlikely(rc)) { 4750 rc = -EIO; 4751 goto err_exit; 4752 } 4753 4754 if (!phba->sli4_hba.intr_enable) 4755 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 4756 else { 4757 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 4758 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 4759 } 4760 if (unlikely(rc)) { 4761 rc = -EIO; 4762 goto err_exit; 4763 } 4764 4765 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 4766 if (bf_get(lpfc_mbox_hdr_status, 4767 &rsrc_info->header.cfg_shdr.response)) { 4768 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4769 "2930 Failed to get resource extents " 4770 "Status 0x%x Add'l Status 0x%x\n", 4771 bf_get(lpfc_mbox_hdr_status, 4772 &rsrc_info->header.cfg_shdr.response), 4773 bf_get(lpfc_mbox_hdr_add_status, 4774 &rsrc_info->header.cfg_shdr.response)); 4775 rc = -EIO; 4776 goto err_exit; 4777 } 4778 4779 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 4780 &rsrc_info->u.rsp); 4781 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 4782 &rsrc_info->u.rsp); 4783 err_exit: 4784 mempool_free(mbox, phba->mbox_mem_pool); 4785 return rc; 4786 } 4787 4788 /** 4789 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 4790 * @phba: Pointer to HBA context object. 4791 * @type: The extent type to check. 4792 * 4793 * This function reads the current available extents from the port and checks 4794 * if the extent count or extent size has changed since the last access. 4795 * Callers use this routine post port reset to understand if there is a 4796 * extent reprovisioning requirement. 4797 * 4798 * Returns: 4799 * -Error: error indicates problem. 4800 * 1: Extent count or size has changed. 4801 * 0: No changes. 4802 **/ 4803 static int 4804 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 4805 { 4806 uint16_t curr_ext_cnt, rsrc_ext_cnt; 4807 uint16_t size_diff, rsrc_ext_size; 4808 int rc = 0; 4809 struct lpfc_rsrc_blks *rsrc_entry; 4810 struct list_head *rsrc_blk_list = NULL; 4811 4812 size_diff = 0; 4813 curr_ext_cnt = 0; 4814 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 4815 &rsrc_ext_cnt, 4816 &rsrc_ext_size); 4817 if (unlikely(rc)) 4818 return -EIO; 4819 4820 switch (type) { 4821 case LPFC_RSC_TYPE_FCOE_RPI: 4822 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 4823 break; 4824 case LPFC_RSC_TYPE_FCOE_VPI: 4825 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 4826 break; 4827 case LPFC_RSC_TYPE_FCOE_XRI: 4828 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 4829 break; 4830 case LPFC_RSC_TYPE_FCOE_VFI: 4831 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 4832 break; 4833 default: 4834 break; 4835 } 4836 4837 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 4838 curr_ext_cnt++; 4839 if (rsrc_entry->rsrc_size != rsrc_ext_size) 4840 size_diff++; 4841 } 4842 4843 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 4844 rc = 1; 4845 4846 return rc; 4847 } 4848 4849 /** 4850 * lpfc_sli4_cfg_post_extnts - 4851 * @phba: Pointer to HBA context object. 4852 * @extnt_cnt - number of available extents. 4853 * @type - the extent type (rpi, xri, vfi, vpi). 4854 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 4855 * @mbox - pointer to the caller's allocated mailbox structure. 4856 * 4857 * This function executes the extents allocation request. It also 4858 * takes care of the amount of memory needed to allocate or get the 4859 * allocated extents. It is the caller's responsibility to evaluate 4860 * the response. 4861 * 4862 * Returns: 4863 * -Error: Error value describes the condition found. 4864 * 0: if successful 4865 **/ 4866 static int 4867 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, 4868 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 4869 { 4870 int rc = 0; 4871 uint32_t req_len; 4872 uint32_t emb_len; 4873 uint32_t alloc_len, mbox_tmo; 4874 4875 /* Calculate the total requested length of the dma memory */ 4876 req_len = *extnt_cnt * sizeof(uint16_t); 4877 4878 /* 4879 * Calculate the size of an embedded mailbox. The uint32_t 4880 * accounts for extents-specific word. 4881 */ 4882 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 4883 sizeof(uint32_t); 4884 4885 /* 4886 * Presume the allocation and response will fit into an embedded 4887 * mailbox. If not true, reconfigure to a non-embedded mailbox. 4888 */ 4889 *emb = LPFC_SLI4_MBX_EMBED; 4890 if (req_len > emb_len) { 4891 req_len = *extnt_cnt * sizeof(uint16_t) + 4892 sizeof(union lpfc_sli4_cfg_shdr) + 4893 sizeof(uint32_t); 4894 *emb = LPFC_SLI4_MBX_NEMBED; 4895 } 4896 4897 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 4898 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 4899 req_len, *emb); 4900 if (alloc_len < req_len) { 4901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4902 "2982 Allocated DMA memory size (x%x) is " 4903 "less than the requested DMA memory " 4904 "size (x%x)\n", alloc_len, req_len); 4905 return -ENOMEM; 4906 } 4907 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb); 4908 if (unlikely(rc)) 4909 return -EIO; 4910 4911 if (!phba->sli4_hba.intr_enable) 4912 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 4913 else { 4914 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 4915 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 4916 } 4917 4918 if (unlikely(rc)) 4919 rc = -EIO; 4920 return rc; 4921 } 4922 4923 /** 4924 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 4925 * @phba: Pointer to HBA context object. 4926 * @type: The resource extent type to allocate. 4927 * 4928 * This function allocates the number of elements for the specified 4929 * resource type. 4930 **/ 4931 static int 4932 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 4933 { 4934 bool emb = false; 4935 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 4936 uint16_t rsrc_id, rsrc_start, j, k; 4937 uint16_t *ids; 4938 int i, rc; 4939 unsigned long longs; 4940 unsigned long *bmask; 4941 struct lpfc_rsrc_blks *rsrc_blks; 4942 LPFC_MBOXQ_t *mbox; 4943 uint32_t length; 4944 struct lpfc_id_range *id_array = NULL; 4945 void *virtaddr = NULL; 4946 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 4947 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 4948 struct list_head *ext_blk_list; 4949 4950 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 4951 &rsrc_cnt, 4952 &rsrc_size); 4953 if (unlikely(rc)) 4954 return -EIO; 4955 4956 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 4957 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4958 "3009 No available Resource Extents " 4959 "for resource type 0x%x: Count: 0x%x, " 4960 "Size 0x%x\n", type, rsrc_cnt, 4961 rsrc_size); 4962 return -ENOMEM; 4963 } 4964 4965 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT, 4966 "2903 Available Resource Extents " 4967 "for resource type 0x%x: Count: 0x%x, " 4968 "Size 0x%x\n", type, rsrc_cnt, 4969 rsrc_size); 4970 4971 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4972 if (!mbox) 4973 return -ENOMEM; 4974 4975 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox); 4976 if (unlikely(rc)) { 4977 rc = -EIO; 4978 goto err_exit; 4979 } 4980 4981 /* 4982 * Figure out where the response is located. Then get local pointers 4983 * to the response data. The port does not guarantee to respond to 4984 * all extents counts request so update the local variable with the 4985 * allocated count from the port. 4986 */ 4987 if (emb == LPFC_SLI4_MBX_EMBED) { 4988 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 4989 id_array = &rsrc_ext->u.rsp.id[0]; 4990 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 4991 } else { 4992 virtaddr = mbox->sge_array->addr[0]; 4993 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 4994 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 4995 id_array = &n_rsrc->id; 4996 } 4997 4998 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4999 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5000 5001 /* 5002 * Based on the resource size and count, correct the base and max 5003 * resource values. 5004 */ 5005 length = sizeof(struct lpfc_rsrc_blks); 5006 switch (type) { 5007 case LPFC_RSC_TYPE_FCOE_RPI: 5008 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5009 sizeof(unsigned long), 5010 GFP_KERNEL); 5011 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5012 rc = -ENOMEM; 5013 goto err_exit; 5014 } 5015 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5016 sizeof(uint16_t), 5017 GFP_KERNEL); 5018 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5019 kfree(phba->sli4_hba.rpi_bmask); 5020 rc = -ENOMEM; 5021 goto err_exit; 5022 } 5023 5024 /* 5025 * The next_rpi was initialized with the maximum available 5026 * count but the port may allocate a smaller number. Catch 5027 * that case and update the next_rpi. 5028 */ 5029 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5030 5031 /* Initialize local ptrs for common extent processing later. */ 5032 bmask = phba->sli4_hba.rpi_bmask; 5033 ids = phba->sli4_hba.rpi_ids; 5034 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5035 break; 5036 case LPFC_RSC_TYPE_FCOE_VPI: 5037 phba->vpi_bmask = kzalloc(longs * 5038 sizeof(unsigned long), 5039 GFP_KERNEL); 5040 if (unlikely(!phba->vpi_bmask)) { 5041 rc = -ENOMEM; 5042 goto err_exit; 5043 } 5044 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5045 sizeof(uint16_t), 5046 GFP_KERNEL); 5047 if (unlikely(!phba->vpi_ids)) { 5048 kfree(phba->vpi_bmask); 5049 rc = -ENOMEM; 5050 goto err_exit; 5051 } 5052 5053 /* Initialize local ptrs for common extent processing later. */ 5054 bmask = phba->vpi_bmask; 5055 ids = phba->vpi_ids; 5056 ext_blk_list = &phba->lpfc_vpi_blk_list; 5057 break; 5058 case LPFC_RSC_TYPE_FCOE_XRI: 5059 phba->sli4_hba.xri_bmask = kzalloc(longs * 5060 sizeof(unsigned long), 5061 GFP_KERNEL); 5062 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5063 rc = -ENOMEM; 5064 goto err_exit; 5065 } 5066 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5067 sizeof(uint16_t), 5068 GFP_KERNEL); 5069 if (unlikely(!phba->sli4_hba.xri_ids)) { 5070 kfree(phba->sli4_hba.xri_bmask); 5071 rc = -ENOMEM; 5072 goto err_exit; 5073 } 5074 5075 /* Initialize local ptrs for common extent processing later. */ 5076 bmask = phba->sli4_hba.xri_bmask; 5077 ids = phba->sli4_hba.xri_ids; 5078 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5079 break; 5080 case LPFC_RSC_TYPE_FCOE_VFI: 5081 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5082 sizeof(unsigned long), 5083 GFP_KERNEL); 5084 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5085 rc = -ENOMEM; 5086 goto err_exit; 5087 } 5088 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5089 sizeof(uint16_t), 5090 GFP_KERNEL); 5091 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5092 kfree(phba->sli4_hba.vfi_bmask); 5093 rc = -ENOMEM; 5094 goto err_exit; 5095 } 5096 5097 /* Initialize local ptrs for common extent processing later. */ 5098 bmask = phba->sli4_hba.vfi_bmask; 5099 ids = phba->sli4_hba.vfi_ids; 5100 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5101 break; 5102 default: 5103 /* Unsupported Opcode. Fail call. */ 5104 id_array = NULL; 5105 bmask = NULL; 5106 ids = NULL; 5107 ext_blk_list = NULL; 5108 goto err_exit; 5109 } 5110 5111 /* 5112 * Complete initializing the extent configuration with the 5113 * allocated ids assigned to this function. The bitmask serves 5114 * as an index into the array and manages the available ids. The 5115 * array just stores the ids communicated to the port via the wqes. 5116 */ 5117 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5118 if ((i % 2) == 0) 5119 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5120 &id_array[k]); 5121 else 5122 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5123 &id_array[k]); 5124 5125 rsrc_blks = kzalloc(length, GFP_KERNEL); 5126 if (unlikely(!rsrc_blks)) { 5127 rc = -ENOMEM; 5128 kfree(bmask); 5129 kfree(ids); 5130 goto err_exit; 5131 } 5132 rsrc_blks->rsrc_start = rsrc_id; 5133 rsrc_blks->rsrc_size = rsrc_size; 5134 list_add_tail(&rsrc_blks->list, ext_blk_list); 5135 rsrc_start = rsrc_id; 5136 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) 5137 phba->sli4_hba.scsi_xri_start = rsrc_start + 5138 lpfc_sli4_get_els_iocb_cnt(phba); 5139 5140 while (rsrc_id < (rsrc_start + rsrc_size)) { 5141 ids[j] = rsrc_id; 5142 rsrc_id++; 5143 j++; 5144 } 5145 /* Entire word processed. Get next word.*/ 5146 if ((i % 2) == 1) 5147 k++; 5148 } 5149 err_exit: 5150 lpfc_sli4_mbox_cmd_free(phba, mbox); 5151 return rc; 5152 } 5153 5154 /** 5155 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5156 * @phba: Pointer to HBA context object. 5157 * @type: the extent's type. 5158 * 5159 * This function deallocates all extents of a particular resource type. 5160 * SLI4 does not allow for deallocating a particular extent range. It 5161 * is the caller's responsibility to release all kernel memory resources. 5162 **/ 5163 static int 5164 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5165 { 5166 int rc; 5167 uint32_t length, mbox_tmo = 0; 5168 LPFC_MBOXQ_t *mbox; 5169 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5170 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5171 5172 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5173 if (!mbox) 5174 return -ENOMEM; 5175 5176 /* 5177 * This function sends an embedded mailbox because it only sends the 5178 * the resource type. All extents of this type are released by the 5179 * port. 5180 */ 5181 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5182 sizeof(struct lpfc_sli4_cfg_mhdr)); 5183 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5184 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5185 length, LPFC_SLI4_MBX_EMBED); 5186 5187 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5188 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5189 LPFC_SLI4_MBX_EMBED); 5190 if (unlikely(rc)) { 5191 rc = -EIO; 5192 goto out_free_mbox; 5193 } 5194 if (!phba->sli4_hba.intr_enable) 5195 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5196 else { 5197 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo); 5198 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5199 } 5200 if (unlikely(rc)) { 5201 rc = -EIO; 5202 goto out_free_mbox; 5203 } 5204 5205 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5206 if (bf_get(lpfc_mbox_hdr_status, 5207 &dealloc_rsrc->header.cfg_shdr.response)) { 5208 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5209 "2919 Failed to release resource extents " 5210 "for type %d - Status 0x%x Add'l Status 0x%x. " 5211 "Resource memory not released.\n", 5212 type, 5213 bf_get(lpfc_mbox_hdr_status, 5214 &dealloc_rsrc->header.cfg_shdr.response), 5215 bf_get(lpfc_mbox_hdr_add_status, 5216 &dealloc_rsrc->header.cfg_shdr.response)); 5217 rc = -EIO; 5218 goto out_free_mbox; 5219 } 5220 5221 /* Release kernel memory resources for the specific type. */ 5222 switch (type) { 5223 case LPFC_RSC_TYPE_FCOE_VPI: 5224 kfree(phba->vpi_bmask); 5225 kfree(phba->vpi_ids); 5226 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5227 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5228 &phba->lpfc_vpi_blk_list, list) { 5229 list_del_init(&rsrc_blk->list); 5230 kfree(rsrc_blk); 5231 } 5232 break; 5233 case LPFC_RSC_TYPE_FCOE_XRI: 5234 kfree(phba->sli4_hba.xri_bmask); 5235 kfree(phba->sli4_hba.xri_ids); 5236 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5237 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5238 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5239 list_del_init(&rsrc_blk->list); 5240 kfree(rsrc_blk); 5241 } 5242 break; 5243 case LPFC_RSC_TYPE_FCOE_VFI: 5244 kfree(phba->sli4_hba.vfi_bmask); 5245 kfree(phba->sli4_hba.vfi_ids); 5246 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5247 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5248 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5249 list_del_init(&rsrc_blk->list); 5250 kfree(rsrc_blk); 5251 } 5252 break; 5253 case LPFC_RSC_TYPE_FCOE_RPI: 5254 /* RPI bitmask and physical id array are cleaned up earlier. */ 5255 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5256 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5257 list_del_init(&rsrc_blk->list); 5258 kfree(rsrc_blk); 5259 } 5260 break; 5261 default: 5262 break; 5263 } 5264 5265 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5266 5267 out_free_mbox: 5268 mempool_free(mbox, phba->mbox_mem_pool); 5269 return rc; 5270 } 5271 5272 /** 5273 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5274 * @phba: Pointer to HBA context object. 5275 * 5276 * This function allocates all SLI4 resource identifiers. 5277 **/ 5278 int 5279 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5280 { 5281 int i, rc, error = 0; 5282 uint16_t count, base; 5283 unsigned long longs; 5284 5285 if (phba->sli4_hba.extents_in_use) { 5286 /* 5287 * The port supports resource extents. The XRI, VPI, VFI, RPI 5288 * resource extent count must be read and allocated before 5289 * provisioning the resource id arrays. 5290 */ 5291 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5292 LPFC_IDX_RSRC_RDY) { 5293 /* 5294 * Extent-based resources are set - the driver could 5295 * be in a port reset. Figure out if any corrective 5296 * actions need to be taken. 5297 */ 5298 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5299 LPFC_RSC_TYPE_FCOE_VFI); 5300 if (rc != 0) 5301 error++; 5302 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5303 LPFC_RSC_TYPE_FCOE_VPI); 5304 if (rc != 0) 5305 error++; 5306 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5307 LPFC_RSC_TYPE_FCOE_XRI); 5308 if (rc != 0) 5309 error++; 5310 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5311 LPFC_RSC_TYPE_FCOE_RPI); 5312 if (rc != 0) 5313 error++; 5314 5315 /* 5316 * It's possible that the number of resources 5317 * provided to this port instance changed between 5318 * resets. Detect this condition and reallocate 5319 * resources. Otherwise, there is no action. 5320 */ 5321 if (error) { 5322 lpfc_printf_log(phba, KERN_INFO, 5323 LOG_MBOX | LOG_INIT, 5324 "2931 Detected extent resource " 5325 "change. Reallocating all " 5326 "extents.\n"); 5327 rc = lpfc_sli4_dealloc_extent(phba, 5328 LPFC_RSC_TYPE_FCOE_VFI); 5329 rc = lpfc_sli4_dealloc_extent(phba, 5330 LPFC_RSC_TYPE_FCOE_VPI); 5331 rc = lpfc_sli4_dealloc_extent(phba, 5332 LPFC_RSC_TYPE_FCOE_XRI); 5333 rc = lpfc_sli4_dealloc_extent(phba, 5334 LPFC_RSC_TYPE_FCOE_RPI); 5335 } else 5336 return 0; 5337 } 5338 5339 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5340 if (unlikely(rc)) 5341 goto err_exit; 5342 5343 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5344 if (unlikely(rc)) 5345 goto err_exit; 5346 5347 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5348 if (unlikely(rc)) 5349 goto err_exit; 5350 5351 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5352 if (unlikely(rc)) 5353 goto err_exit; 5354 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5355 LPFC_IDX_RSRC_RDY); 5356 return rc; 5357 } else { 5358 /* 5359 * The port does not support resource extents. The XRI, VPI, 5360 * VFI, RPI resource ids were determined from READ_CONFIG. 5361 * Just allocate the bitmasks and provision the resource id 5362 * arrays. If a port reset is active, the resources don't 5363 * need any action - just exit. 5364 */ 5365 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5366 LPFC_IDX_RSRC_RDY) 5367 return 0; 5368 5369 /* RPIs. */ 5370 count = phba->sli4_hba.max_cfg_param.max_rpi; 5371 base = phba->sli4_hba.max_cfg_param.rpi_base; 5372 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5373 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5374 sizeof(unsigned long), 5375 GFP_KERNEL); 5376 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5377 rc = -ENOMEM; 5378 goto err_exit; 5379 } 5380 phba->sli4_hba.rpi_ids = kzalloc(count * 5381 sizeof(uint16_t), 5382 GFP_KERNEL); 5383 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5384 rc = -ENOMEM; 5385 goto free_rpi_bmask; 5386 } 5387 5388 for (i = 0; i < count; i++) 5389 phba->sli4_hba.rpi_ids[i] = base + i; 5390 5391 /* VPIs. */ 5392 count = phba->sli4_hba.max_cfg_param.max_vpi; 5393 base = phba->sli4_hba.max_cfg_param.vpi_base; 5394 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5395 phba->vpi_bmask = kzalloc(longs * 5396 sizeof(unsigned long), 5397 GFP_KERNEL); 5398 if (unlikely(!phba->vpi_bmask)) { 5399 rc = -ENOMEM; 5400 goto free_rpi_ids; 5401 } 5402 phba->vpi_ids = kzalloc(count * 5403 sizeof(uint16_t), 5404 GFP_KERNEL); 5405 if (unlikely(!phba->vpi_ids)) { 5406 rc = -ENOMEM; 5407 goto free_vpi_bmask; 5408 } 5409 5410 for (i = 0; i < count; i++) 5411 phba->vpi_ids[i] = base + i; 5412 5413 /* XRIs. */ 5414 count = phba->sli4_hba.max_cfg_param.max_xri; 5415 base = phba->sli4_hba.max_cfg_param.xri_base; 5416 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5417 phba->sli4_hba.xri_bmask = kzalloc(longs * 5418 sizeof(unsigned long), 5419 GFP_KERNEL); 5420 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5421 rc = -ENOMEM; 5422 goto free_vpi_ids; 5423 } 5424 phba->sli4_hba.xri_ids = kzalloc(count * 5425 sizeof(uint16_t), 5426 GFP_KERNEL); 5427 if (unlikely(!phba->sli4_hba.xri_ids)) { 5428 rc = -ENOMEM; 5429 goto free_xri_bmask; 5430 } 5431 5432 for (i = 0; i < count; i++) 5433 phba->sli4_hba.xri_ids[i] = base + i; 5434 5435 /* VFIs. */ 5436 count = phba->sli4_hba.max_cfg_param.max_vfi; 5437 base = phba->sli4_hba.max_cfg_param.vfi_base; 5438 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5439 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5440 sizeof(unsigned long), 5441 GFP_KERNEL); 5442 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5443 rc = -ENOMEM; 5444 goto free_xri_ids; 5445 } 5446 phba->sli4_hba.vfi_ids = kzalloc(count * 5447 sizeof(uint16_t), 5448 GFP_KERNEL); 5449 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5450 rc = -ENOMEM; 5451 goto free_vfi_bmask; 5452 } 5453 5454 for (i = 0; i < count; i++) 5455 phba->sli4_hba.vfi_ids[i] = base + i; 5456 5457 /* 5458 * Mark all resources ready. An HBA reset doesn't need 5459 * to reset the initialization. 5460 */ 5461 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5462 LPFC_IDX_RSRC_RDY); 5463 return 0; 5464 } 5465 5466 free_vfi_bmask: 5467 kfree(phba->sli4_hba.vfi_bmask); 5468 free_xri_ids: 5469 kfree(phba->sli4_hba.xri_ids); 5470 free_xri_bmask: 5471 kfree(phba->sli4_hba.xri_bmask); 5472 free_vpi_ids: 5473 kfree(phba->vpi_ids); 5474 free_vpi_bmask: 5475 kfree(phba->vpi_bmask); 5476 free_rpi_ids: 5477 kfree(phba->sli4_hba.rpi_ids); 5478 free_rpi_bmask: 5479 kfree(phba->sli4_hba.rpi_bmask); 5480 err_exit: 5481 return rc; 5482 } 5483 5484 /** 5485 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 5486 * @phba: Pointer to HBA context object. 5487 * 5488 * This function allocates the number of elements for the specified 5489 * resource type. 5490 **/ 5491 int 5492 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 5493 { 5494 if (phba->sli4_hba.extents_in_use) { 5495 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5496 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5497 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5498 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5499 } else { 5500 kfree(phba->vpi_bmask); 5501 kfree(phba->vpi_ids); 5502 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5503 kfree(phba->sli4_hba.xri_bmask); 5504 kfree(phba->sli4_hba.xri_ids); 5505 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5506 kfree(phba->sli4_hba.vfi_bmask); 5507 kfree(phba->sli4_hba.vfi_ids); 5508 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5509 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5510 } 5511 5512 return 0; 5513 } 5514 5515 /** 5516 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 5517 * @phba: Pointer to HBA context object. 5518 * @type: The resource extent type. 5519 * @extnt_count: buffer to hold port extent count response 5520 * @extnt_size: buffer to hold port extent size response. 5521 * 5522 * This function calls the port to read the host allocated extents 5523 * for a particular type. 5524 **/ 5525 int 5526 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 5527 uint16_t *extnt_cnt, uint16_t *extnt_size) 5528 { 5529 bool emb; 5530 int rc = 0; 5531 uint16_t curr_blks = 0; 5532 uint32_t req_len, emb_len; 5533 uint32_t alloc_len, mbox_tmo; 5534 struct list_head *blk_list_head; 5535 struct lpfc_rsrc_blks *rsrc_blk; 5536 LPFC_MBOXQ_t *mbox; 5537 void *virtaddr = NULL; 5538 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5539 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5540 union lpfc_sli4_cfg_shdr *shdr; 5541 5542 switch (type) { 5543 case LPFC_RSC_TYPE_FCOE_VPI: 5544 blk_list_head = &phba->lpfc_vpi_blk_list; 5545 break; 5546 case LPFC_RSC_TYPE_FCOE_XRI: 5547 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 5548 break; 5549 case LPFC_RSC_TYPE_FCOE_VFI: 5550 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 5551 break; 5552 case LPFC_RSC_TYPE_FCOE_RPI: 5553 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 5554 break; 5555 default: 5556 return -EIO; 5557 } 5558 5559 /* Count the number of extents currently allocatd for this type. */ 5560 list_for_each_entry(rsrc_blk, blk_list_head, list) { 5561 if (curr_blks == 0) { 5562 /* 5563 * The GET_ALLOCATED mailbox does not return the size, 5564 * just the count. The size should be just the size 5565 * stored in the current allocated block and all sizes 5566 * for an extent type are the same so set the return 5567 * value now. 5568 */ 5569 *extnt_size = rsrc_blk->rsrc_size; 5570 } 5571 curr_blks++; 5572 } 5573 5574 /* Calculate the total requested length of the dma memory. */ 5575 req_len = curr_blks * sizeof(uint16_t); 5576 5577 /* 5578 * Calculate the size of an embedded mailbox. The uint32_t 5579 * accounts for extents-specific word. 5580 */ 5581 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5582 sizeof(uint32_t); 5583 5584 /* 5585 * Presume the allocation and response will fit into an embedded 5586 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5587 */ 5588 emb = LPFC_SLI4_MBX_EMBED; 5589 req_len = emb_len; 5590 if (req_len > emb_len) { 5591 req_len = curr_blks * sizeof(uint16_t) + 5592 sizeof(union lpfc_sli4_cfg_shdr) + 5593 sizeof(uint32_t); 5594 emb = LPFC_SLI4_MBX_NEMBED; 5595 } 5596 5597 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5598 if (!mbox) 5599 return -ENOMEM; 5600 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 5601 5602 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5603 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 5604 req_len, emb); 5605 if (alloc_len < req_len) { 5606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5607 "2983 Allocated DMA memory size (x%x) is " 5608 "less than the requested DMA memory " 5609 "size (x%x)\n", alloc_len, req_len); 5610 rc = -ENOMEM; 5611 goto err_exit; 5612 } 5613 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 5614 if (unlikely(rc)) { 5615 rc = -EIO; 5616 goto err_exit; 5617 } 5618 5619 if (!phba->sli4_hba.intr_enable) 5620 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5621 else { 5622 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5623 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5624 } 5625 5626 if (unlikely(rc)) { 5627 rc = -EIO; 5628 goto err_exit; 5629 } 5630 5631 /* 5632 * Figure out where the response is located. Then get local pointers 5633 * to the response data. The port does not guarantee to respond to 5634 * all extents counts request so update the local variable with the 5635 * allocated count from the port. 5636 */ 5637 if (emb == LPFC_SLI4_MBX_EMBED) { 5638 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5639 shdr = &rsrc_ext->header.cfg_shdr; 5640 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5641 } else { 5642 virtaddr = mbox->sge_array->addr[0]; 5643 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5644 shdr = &n_rsrc->cfg_shdr; 5645 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5646 } 5647 5648 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 5649 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5650 "2984 Failed to read allocated resources " 5651 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 5652 type, 5653 bf_get(lpfc_mbox_hdr_status, &shdr->response), 5654 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 5655 rc = -EIO; 5656 goto err_exit; 5657 } 5658 err_exit: 5659 lpfc_sli4_mbox_cmd_free(phba, mbox); 5660 return rc; 5661 } 5662 5663 /** 5664 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5665 * @phba: Pointer to HBA context object. 5666 * 5667 * This function is the main SLI4 device intialization PCI function. This 5668 * function is called by the HBA intialization code, HBA reset code and 5669 * HBA error attention handler code. Caller is not required to hold any 5670 * locks. 5671 **/ 5672 int 5673 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 5674 { 5675 int rc; 5676 LPFC_MBOXQ_t *mboxq; 5677 struct lpfc_mqe *mqe; 5678 uint8_t *vpd; 5679 uint32_t vpd_size; 5680 uint32_t ftr_rsp = 0; 5681 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 5682 struct lpfc_vport *vport = phba->pport; 5683 struct lpfc_dmabuf *mp; 5684 5685 /* Perform a PCI function reset to start from clean */ 5686 rc = lpfc_pci_function_reset(phba); 5687 if (unlikely(rc)) 5688 return -ENODEV; 5689 5690 /* Check the HBA Host Status Register for readyness */ 5691 rc = lpfc_sli4_post_status_check(phba); 5692 if (unlikely(rc)) 5693 return -ENODEV; 5694 else { 5695 spin_lock_irq(&phba->hbalock); 5696 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 5697 spin_unlock_irq(&phba->hbalock); 5698 } 5699 5700 /* 5701 * Allocate a single mailbox container for initializing the 5702 * port. 5703 */ 5704 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5705 if (!mboxq) 5706 return -ENOMEM; 5707 5708 /* 5709 * Continue initialization with default values even if driver failed 5710 * to read FCoE param config regions 5711 */ 5712 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 5713 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 5714 "2570 Failed to read FCoE parameters\n"); 5715 5716 /* Issue READ_REV to collect vpd and FW information. */ 5717 vpd_size = SLI4_PAGE_SIZE; 5718 vpd = kzalloc(vpd_size, GFP_KERNEL); 5719 if (!vpd) { 5720 rc = -ENOMEM; 5721 goto out_free_mbox; 5722 } 5723 5724 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 5725 if (unlikely(rc)) { 5726 kfree(vpd); 5727 goto out_free_mbox; 5728 } 5729 mqe = &mboxq->u.mqe; 5730 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 5731 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 5732 phba->hba_flag |= HBA_FCOE_MODE; 5733 else 5734 phba->hba_flag &= ~HBA_FCOE_MODE; 5735 5736 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 5737 LPFC_DCBX_CEE_MODE) 5738 phba->hba_flag |= HBA_FIP_SUPPORT; 5739 else 5740 phba->hba_flag &= ~HBA_FIP_SUPPORT; 5741 5742 if (phba->sli_rev != LPFC_SLI_REV4) { 5743 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5744 "0376 READ_REV Error. SLI Level %d " 5745 "FCoE enabled %d\n", 5746 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 5747 rc = -EIO; 5748 kfree(vpd); 5749 goto out_free_mbox; 5750 } 5751 /* 5752 * Evaluate the read rev and vpd data. Populate the driver 5753 * state with the results. If this routine fails, the failure 5754 * is not fatal as the driver will use generic values. 5755 */ 5756 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 5757 if (unlikely(!rc)) { 5758 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5759 "0377 Error %d parsing vpd. " 5760 "Using defaults.\n", rc); 5761 rc = 0; 5762 } 5763 kfree(vpd); 5764 5765 /* Save information as VPD data */ 5766 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 5767 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 5768 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 5769 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 5770 &mqe->un.read_rev); 5771 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 5772 &mqe->un.read_rev); 5773 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 5774 &mqe->un.read_rev); 5775 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 5776 &mqe->un.read_rev); 5777 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 5778 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 5779 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 5780 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 5781 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 5782 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 5783 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5784 "(%d):0380 READ_REV Status x%x " 5785 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 5786 mboxq->vport ? mboxq->vport->vpi : 0, 5787 bf_get(lpfc_mqe_status, mqe), 5788 phba->vpd.rev.opFwName, 5789 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 5790 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 5791 5792 /* 5793 * Discover the port's supported feature set and match it against the 5794 * hosts requests. 5795 */ 5796 lpfc_request_features(phba, mboxq); 5797 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5798 if (unlikely(rc)) { 5799 rc = -EIO; 5800 goto out_free_mbox; 5801 } 5802 5803 /* 5804 * The port must support FCP initiator mode as this is the 5805 * only mode running in the host. 5806 */ 5807 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 5808 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5809 "0378 No support for fcpi mode.\n"); 5810 ftr_rsp++; 5811 } 5812 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 5813 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 5814 else 5815 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 5816 /* 5817 * If the port cannot support the host's requested features 5818 * then turn off the global config parameters to disable the 5819 * feature in the driver. This is not a fatal error. 5820 */ 5821 if ((phba->cfg_enable_bg) && 5822 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 5823 ftr_rsp++; 5824 5825 if (phba->max_vpi && phba->cfg_enable_npiv && 5826 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 5827 ftr_rsp++; 5828 5829 if (ftr_rsp) { 5830 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5831 "0379 Feature Mismatch Data: x%08x %08x " 5832 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 5833 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 5834 phba->cfg_enable_npiv, phba->max_vpi); 5835 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 5836 phba->cfg_enable_bg = 0; 5837 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 5838 phba->cfg_enable_npiv = 0; 5839 } 5840 5841 /* These SLI3 features are assumed in SLI4 */ 5842 spin_lock_irq(&phba->hbalock); 5843 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 5844 spin_unlock_irq(&phba->hbalock); 5845 5846 /* 5847 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 5848 * calls depends on these resources to complete port setup. 5849 */ 5850 rc = lpfc_sli4_alloc_resource_identifiers(phba); 5851 if (rc) { 5852 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5853 "2920 Failed to alloc Resource IDs " 5854 "rc = x%x\n", rc); 5855 goto out_free_mbox; 5856 } 5857 5858 /* Read the port's service parameters. */ 5859 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 5860 if (rc) { 5861 phba->link_state = LPFC_HBA_ERROR; 5862 rc = -ENOMEM; 5863 goto out_free_mbox; 5864 } 5865 5866 mboxq->vport = vport; 5867 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5868 mp = (struct lpfc_dmabuf *) mboxq->context1; 5869 if (rc == MBX_SUCCESS) { 5870 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 5871 rc = 0; 5872 } 5873 5874 /* 5875 * This memory was allocated by the lpfc_read_sparam routine. Release 5876 * it to the mbuf pool. 5877 */ 5878 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5879 kfree(mp); 5880 mboxq->context1 = NULL; 5881 if (unlikely(rc)) { 5882 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5883 "0382 READ_SPARAM command failed " 5884 "status %d, mbxStatus x%x\n", 5885 rc, bf_get(lpfc_mqe_status, mqe)); 5886 phba->link_state = LPFC_HBA_ERROR; 5887 rc = -EIO; 5888 goto out_free_mbox; 5889 } 5890 5891 lpfc_update_vport_wwn(vport); 5892 5893 /* Update the fc_host data structures with new wwn. */ 5894 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 5895 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 5896 5897 /* Register SGL pool to the device using non-embedded mailbox command */ 5898 if (!phba->sli4_hba.extents_in_use) { 5899 rc = lpfc_sli4_post_els_sgl_list(phba); 5900 if (unlikely(rc)) { 5901 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5902 "0582 Error %d during els sgl post " 5903 "operation\n", rc); 5904 rc = -ENODEV; 5905 goto out_free_mbox; 5906 } 5907 } else { 5908 rc = lpfc_sli4_post_els_sgl_list_ext(phba); 5909 if (unlikely(rc)) { 5910 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5911 "2560 Error %d during els sgl post " 5912 "operation\n", rc); 5913 rc = -ENODEV; 5914 goto out_free_mbox; 5915 } 5916 } 5917 5918 /* Register SCSI SGL pool to the device */ 5919 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 5920 if (unlikely(rc)) { 5921 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5922 "0383 Error %d during scsi sgl post " 5923 "operation\n", rc); 5924 /* Some Scsi buffers were moved to the abort scsi list */ 5925 /* A pci function reset will repost them */ 5926 rc = -ENODEV; 5927 goto out_free_mbox; 5928 } 5929 5930 /* Post the rpi header region to the device. */ 5931 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 5932 if (unlikely(rc)) { 5933 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5934 "0393 Error %d during rpi post operation\n", 5935 rc); 5936 rc = -ENODEV; 5937 goto out_free_mbox; 5938 } 5939 5940 /* Set up all the queues to the device */ 5941 rc = lpfc_sli4_queue_setup(phba); 5942 if (unlikely(rc)) { 5943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5944 "0381 Error %d during queue setup.\n ", rc); 5945 goto out_stop_timers; 5946 } 5947 5948 /* Arm the CQs and then EQs on device */ 5949 lpfc_sli4_arm_cqeq_intr(phba); 5950 5951 /* Indicate device interrupt mode */ 5952 phba->sli4_hba.intr_enable = 1; 5953 5954 /* Allow asynchronous mailbox command to go through */ 5955 spin_lock_irq(&phba->hbalock); 5956 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5957 spin_unlock_irq(&phba->hbalock); 5958 5959 /* Post receive buffers to the device */ 5960 lpfc_sli4_rb_setup(phba); 5961 5962 /* Reset HBA FCF states after HBA reset */ 5963 phba->fcf.fcf_flag = 0; 5964 phba->fcf.current_rec.flag = 0; 5965 5966 /* Start the ELS watchdog timer */ 5967 mod_timer(&vport->els_tmofunc, 5968 jiffies + HZ * (phba->fc_ratov * 2)); 5969 5970 /* Start heart beat timer */ 5971 mod_timer(&phba->hb_tmofunc, 5972 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 5973 phba->hb_outstanding = 0; 5974 phba->last_completion_time = jiffies; 5975 5976 /* Start error attention (ERATT) polling timer */ 5977 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 5978 5979 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5980 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5981 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5982 if (!rc) { 5983 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5984 "2829 This device supports " 5985 "Advanced Error Reporting (AER)\n"); 5986 spin_lock_irq(&phba->hbalock); 5987 phba->hba_flag |= HBA_AER_ENABLED; 5988 spin_unlock_irq(&phba->hbalock); 5989 } else { 5990 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5991 "2830 This device does not support " 5992 "Advanced Error Reporting (AER)\n"); 5993 phba->cfg_aer_support = 0; 5994 } 5995 rc = 0; 5996 } 5997 5998 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5999 /* 6000 * The FC Port needs to register FCFI (index 0) 6001 */ 6002 lpfc_reg_fcfi(phba, mboxq); 6003 mboxq->vport = phba->pport; 6004 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6005 if (rc != MBX_SUCCESS) 6006 goto out_unset_queue; 6007 rc = 0; 6008 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6009 &mboxq->u.mqe.un.reg_fcfi); 6010 } 6011 /* 6012 * The port is ready, set the host's link state to LINK_DOWN 6013 * in preparation for link interrupts. 6014 */ 6015 spin_lock_irq(&phba->hbalock); 6016 phba->link_state = LPFC_LINK_DOWN; 6017 spin_unlock_irq(&phba->hbalock); 6018 if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) 6019 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6020 out_unset_queue: 6021 /* Unset all the queues set up in this routine when error out */ 6022 if (rc) 6023 lpfc_sli4_queue_unset(phba); 6024 out_stop_timers: 6025 if (rc) 6026 lpfc_stop_hba_timers(phba); 6027 out_free_mbox: 6028 mempool_free(mboxq, phba->mbox_mem_pool); 6029 return rc; 6030 } 6031 6032 /** 6033 * lpfc_mbox_timeout - Timeout call back function for mbox timer 6034 * @ptr: context object - pointer to hba structure. 6035 * 6036 * This is the callback function for mailbox timer. The mailbox 6037 * timer is armed when a new mailbox command is issued and the timer 6038 * is deleted when the mailbox complete. The function is called by 6039 * the kernel timer code when a mailbox does not complete within 6040 * expected time. This function wakes up the worker thread to 6041 * process the mailbox timeout and returns. All the processing is 6042 * done by the worker thread function lpfc_mbox_timeout_handler. 6043 **/ 6044 void 6045 lpfc_mbox_timeout(unsigned long ptr) 6046 { 6047 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 6048 unsigned long iflag; 6049 uint32_t tmo_posted; 6050 6051 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 6052 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 6053 if (!tmo_posted) 6054 phba->pport->work_port_events |= WORKER_MBOX_TMO; 6055 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 6056 6057 if (!tmo_posted) 6058 lpfc_worker_wake_up(phba); 6059 return; 6060 } 6061 6062 6063 /** 6064 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 6065 * @phba: Pointer to HBA context object. 6066 * 6067 * This function is called from worker thread when a mailbox command times out. 6068 * The caller is not required to hold any locks. This function will reset the 6069 * HBA and recover all the pending commands. 6070 **/ 6071 void 6072 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6073 { 6074 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6075 MAILBOX_t *mb = &pmbox->u.mb; 6076 struct lpfc_sli *psli = &phba->sli; 6077 struct lpfc_sli_ring *pring; 6078 6079 /* Check the pmbox pointer first. There is a race condition 6080 * between the mbox timeout handler getting executed in the 6081 * worklist and the mailbox actually completing. When this 6082 * race condition occurs, the mbox_active will be NULL. 6083 */ 6084 spin_lock_irq(&phba->hbalock); 6085 if (pmbox == NULL) { 6086 lpfc_printf_log(phba, KERN_WARNING, 6087 LOG_MBOX | LOG_SLI, 6088 "0353 Active Mailbox cleared - mailbox timeout " 6089 "exiting\n"); 6090 spin_unlock_irq(&phba->hbalock); 6091 return; 6092 } 6093 6094 /* Mbox cmd <mbxCommand> timeout */ 6095 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6096 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 6097 mb->mbxCommand, 6098 phba->pport->port_state, 6099 phba->sli.sli_flag, 6100 phba->sli.mbox_active); 6101 spin_unlock_irq(&phba->hbalock); 6102 6103 /* Setting state unknown so lpfc_sli_abort_iocb_ring 6104 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 6105 * it to fail all outstanding SCSI IO. 6106 */ 6107 spin_lock_irq(&phba->pport->work_port_lock); 6108 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6109 spin_unlock_irq(&phba->pport->work_port_lock); 6110 spin_lock_irq(&phba->hbalock); 6111 phba->link_state = LPFC_LINK_UNKNOWN; 6112 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6113 spin_unlock_irq(&phba->hbalock); 6114 6115 pring = &psli->ring[psli->fcp_ring]; 6116 lpfc_sli_abort_iocb_ring(phba, pring); 6117 6118 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6119 "0345 Resetting board due to mailbox timeout\n"); 6120 6121 /* Reset the HBA device */ 6122 lpfc_reset_hba(phba); 6123 } 6124 6125 /** 6126 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 6127 * @phba: Pointer to HBA context object. 6128 * @pmbox: Pointer to mailbox object. 6129 * @flag: Flag indicating how the mailbox need to be processed. 6130 * 6131 * This function is called by discovery code and HBA management code 6132 * to submit a mailbox command to firmware with SLI-3 interface spec. This 6133 * function gets the hbalock to protect the data structures. 6134 * The mailbox command can be submitted in polling mode, in which case 6135 * this function will wait in a polling loop for the completion of the 6136 * mailbox. 6137 * If the mailbox is submitted in no_wait mode (not polling) the 6138 * function will submit the command and returns immediately without waiting 6139 * for the mailbox completion. The no_wait is supported only when HBA 6140 * is in SLI2/SLI3 mode - interrupts are enabled. 6141 * The SLI interface allows only one mailbox pending at a time. If the 6142 * mailbox is issued in polling mode and there is already a mailbox 6143 * pending, then the function will return an error. If the mailbox is issued 6144 * in NO_WAIT mode and there is a mailbox pending already, the function 6145 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 6146 * The sli layer owns the mailbox object until the completion of mailbox 6147 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 6148 * return codes the caller owns the mailbox command after the return of 6149 * the function. 6150 **/ 6151 static int 6152 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 6153 uint32_t flag) 6154 { 6155 MAILBOX_t *mb; 6156 struct lpfc_sli *psli = &phba->sli; 6157 uint32_t status, evtctr; 6158 uint32_t ha_copy, hc_copy; 6159 int i; 6160 unsigned long timeout; 6161 unsigned long drvr_flag = 0; 6162 uint32_t word0, ldata; 6163 void __iomem *to_slim; 6164 int processing_queue = 0; 6165 6166 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6167 if (!pmbox) { 6168 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6169 /* processing mbox queue from intr_handler */ 6170 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6171 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6172 return MBX_SUCCESS; 6173 } 6174 processing_queue = 1; 6175 pmbox = lpfc_mbox_get(phba); 6176 if (!pmbox) { 6177 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6178 return MBX_SUCCESS; 6179 } 6180 } 6181 6182 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 6183 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 6184 if(!pmbox->vport) { 6185 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6186 lpfc_printf_log(phba, KERN_ERR, 6187 LOG_MBOX | LOG_VPORT, 6188 "1806 Mbox x%x failed. No vport\n", 6189 pmbox->u.mb.mbxCommand); 6190 dump_stack(); 6191 goto out_not_finished; 6192 } 6193 } 6194 6195 /* If the PCI channel is in offline state, do not post mbox. */ 6196 if (unlikely(pci_channel_offline(phba->pcidev))) { 6197 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6198 goto out_not_finished; 6199 } 6200 6201 /* If HBA has a deferred error attention, fail the iocb. */ 6202 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 6203 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6204 goto out_not_finished; 6205 } 6206 6207 psli = &phba->sli; 6208 6209 mb = &pmbox->u.mb; 6210 status = MBX_SUCCESS; 6211 6212 if (phba->link_state == LPFC_HBA_ERROR) { 6213 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6214 6215 /* Mbox command <mbxCommand> cannot issue */ 6216 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6217 "(%d):0311 Mailbox command x%x cannot " 6218 "issue Data: x%x x%x\n", 6219 pmbox->vport ? pmbox->vport->vpi : 0, 6220 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6221 goto out_not_finished; 6222 } 6223 6224 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 6225 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 6226 !(hc_copy & HC_MBINT_ENA)) { 6227 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6228 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6229 "(%d):2528 Mailbox command x%x cannot " 6230 "issue Data: x%x x%x\n", 6231 pmbox->vport ? pmbox->vport->vpi : 0, 6232 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6233 goto out_not_finished; 6234 } 6235 } 6236 6237 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6238 /* Polling for a mbox command when another one is already active 6239 * is not allowed in SLI. Also, the driver must have established 6240 * SLI2 mode to queue and process multiple mbox commands. 6241 */ 6242 6243 if (flag & MBX_POLL) { 6244 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6245 6246 /* Mbox command <mbxCommand> cannot issue */ 6247 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6248 "(%d):2529 Mailbox command x%x " 6249 "cannot issue Data: x%x x%x\n", 6250 pmbox->vport ? pmbox->vport->vpi : 0, 6251 pmbox->u.mb.mbxCommand, 6252 psli->sli_flag, flag); 6253 goto out_not_finished; 6254 } 6255 6256 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 6257 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6258 /* Mbox command <mbxCommand> cannot issue */ 6259 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6260 "(%d):2530 Mailbox command x%x " 6261 "cannot issue Data: x%x x%x\n", 6262 pmbox->vport ? pmbox->vport->vpi : 0, 6263 pmbox->u.mb.mbxCommand, 6264 psli->sli_flag, flag); 6265 goto out_not_finished; 6266 } 6267 6268 /* Another mailbox command is still being processed, queue this 6269 * command to be processed later. 6270 */ 6271 lpfc_mbox_put(phba, pmbox); 6272 6273 /* Mbox cmd issue - BUSY */ 6274 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6275 "(%d):0308 Mbox cmd issue - BUSY Data: " 6276 "x%x x%x x%x x%x\n", 6277 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 6278 mb->mbxCommand, phba->pport->port_state, 6279 psli->sli_flag, flag); 6280 6281 psli->slistat.mbox_busy++; 6282 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6283 6284 if (pmbox->vport) { 6285 lpfc_debugfs_disc_trc(pmbox->vport, 6286 LPFC_DISC_TRC_MBOX_VPORT, 6287 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 6288 (uint32_t)mb->mbxCommand, 6289 mb->un.varWords[0], mb->un.varWords[1]); 6290 } 6291 else { 6292 lpfc_debugfs_disc_trc(phba->pport, 6293 LPFC_DISC_TRC_MBOX, 6294 "MBOX Bsy: cmd:x%x mb:x%x x%x", 6295 (uint32_t)mb->mbxCommand, 6296 mb->un.varWords[0], mb->un.varWords[1]); 6297 } 6298 6299 return MBX_BUSY; 6300 } 6301 6302 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6303 6304 /* If we are not polling, we MUST be in SLI2 mode */ 6305 if (flag != MBX_POLL) { 6306 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 6307 (mb->mbxCommand != MBX_KILL_BOARD)) { 6308 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6309 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6310 /* Mbox command <mbxCommand> cannot issue */ 6311 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6312 "(%d):2531 Mailbox command x%x " 6313 "cannot issue Data: x%x x%x\n", 6314 pmbox->vport ? pmbox->vport->vpi : 0, 6315 pmbox->u.mb.mbxCommand, 6316 psli->sli_flag, flag); 6317 goto out_not_finished; 6318 } 6319 /* timeout active mbox command */ 6320 mod_timer(&psli->mbox_tmo, (jiffies + 6321 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 6322 } 6323 6324 /* Mailbox cmd <cmd> issue */ 6325 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6326 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 6327 "x%x\n", 6328 pmbox->vport ? pmbox->vport->vpi : 0, 6329 mb->mbxCommand, phba->pport->port_state, 6330 psli->sli_flag, flag); 6331 6332 if (mb->mbxCommand != MBX_HEARTBEAT) { 6333 if (pmbox->vport) { 6334 lpfc_debugfs_disc_trc(pmbox->vport, 6335 LPFC_DISC_TRC_MBOX_VPORT, 6336 "MBOX Send vport: cmd:x%x mb:x%x x%x", 6337 (uint32_t)mb->mbxCommand, 6338 mb->un.varWords[0], mb->un.varWords[1]); 6339 } 6340 else { 6341 lpfc_debugfs_disc_trc(phba->pport, 6342 LPFC_DISC_TRC_MBOX, 6343 "MBOX Send: cmd:x%x mb:x%x x%x", 6344 (uint32_t)mb->mbxCommand, 6345 mb->un.varWords[0], mb->un.varWords[1]); 6346 } 6347 } 6348 6349 psli->slistat.mbox_cmd++; 6350 evtctr = psli->slistat.mbox_event; 6351 6352 /* next set own bit for the adapter and copy over command word */ 6353 mb->mbxOwner = OWN_CHIP; 6354 6355 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6356 /* Populate mbox extension offset word. */ 6357 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 6358 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6359 = (uint8_t *)phba->mbox_ext 6360 - (uint8_t *)phba->mbox; 6361 } 6362 6363 /* Copy the mailbox extension data */ 6364 if (pmbox->in_ext_byte_len && pmbox->context2) { 6365 lpfc_sli_pcimem_bcopy(pmbox->context2, 6366 (uint8_t *)phba->mbox_ext, 6367 pmbox->in_ext_byte_len); 6368 } 6369 /* Copy command data to host SLIM area */ 6370 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6371 } else { 6372 /* Populate mbox extension offset word. */ 6373 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 6374 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 6375 = MAILBOX_HBA_EXT_OFFSET; 6376 6377 /* Copy the mailbox extension data */ 6378 if (pmbox->in_ext_byte_len && pmbox->context2) { 6379 lpfc_memcpy_to_slim(phba->MBslimaddr + 6380 MAILBOX_HBA_EXT_OFFSET, 6381 pmbox->context2, pmbox->in_ext_byte_len); 6382 6383 } 6384 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6385 /* copy command data into host mbox for cmpl */ 6386 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 6387 } 6388 6389 /* First copy mbox command data to HBA SLIM, skip past first 6390 word */ 6391 to_slim = phba->MBslimaddr + sizeof (uint32_t); 6392 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 6393 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 6394 6395 /* Next copy over first word, with mbxOwner set */ 6396 ldata = *((uint32_t *)mb); 6397 to_slim = phba->MBslimaddr; 6398 writel(ldata, to_slim); 6399 readl(to_slim); /* flush */ 6400 6401 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6402 /* switch over to host mailbox */ 6403 psli->sli_flag |= LPFC_SLI_ACTIVE; 6404 } 6405 } 6406 6407 wmb(); 6408 6409 switch (flag) { 6410 case MBX_NOWAIT: 6411 /* Set up reference to mailbox command */ 6412 psli->mbox_active = pmbox; 6413 /* Interrupt board to do it */ 6414 writel(CA_MBATT, phba->CAregaddr); 6415 readl(phba->CAregaddr); /* flush */ 6416 /* Don't wait for it to finish, just return */ 6417 break; 6418 6419 case MBX_POLL: 6420 /* Set up null reference to mailbox command */ 6421 psli->mbox_active = NULL; 6422 /* Interrupt board to do it */ 6423 writel(CA_MBATT, phba->CAregaddr); 6424 readl(phba->CAregaddr); /* flush */ 6425 6426 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6427 /* First read mbox status word */ 6428 word0 = *((uint32_t *)phba->mbox); 6429 word0 = le32_to_cpu(word0); 6430 } else { 6431 /* First read mbox status word */ 6432 if (lpfc_readl(phba->MBslimaddr, &word0)) { 6433 spin_unlock_irqrestore(&phba->hbalock, 6434 drvr_flag); 6435 goto out_not_finished; 6436 } 6437 } 6438 6439 /* Read the HBA Host Attention Register */ 6440 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6441 spin_unlock_irqrestore(&phba->hbalock, 6442 drvr_flag); 6443 goto out_not_finished; 6444 } 6445 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 6446 mb->mbxCommand) * 6447 1000) + jiffies; 6448 i = 0; 6449 /* Wait for command to complete */ 6450 while (((word0 & OWN_CHIP) == OWN_CHIP) || 6451 (!(ha_copy & HA_MBATT) && 6452 (phba->link_state > LPFC_WARM_START))) { 6453 if (time_after(jiffies, timeout)) { 6454 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6455 spin_unlock_irqrestore(&phba->hbalock, 6456 drvr_flag); 6457 goto out_not_finished; 6458 } 6459 6460 /* Check if we took a mbox interrupt while we were 6461 polling */ 6462 if (((word0 & OWN_CHIP) != OWN_CHIP) 6463 && (evtctr != psli->slistat.mbox_event)) 6464 break; 6465 6466 if (i++ > 10) { 6467 spin_unlock_irqrestore(&phba->hbalock, 6468 drvr_flag); 6469 msleep(1); 6470 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6471 } 6472 6473 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6474 /* First copy command data */ 6475 word0 = *((uint32_t *)phba->mbox); 6476 word0 = le32_to_cpu(word0); 6477 if (mb->mbxCommand == MBX_CONFIG_PORT) { 6478 MAILBOX_t *slimmb; 6479 uint32_t slimword0; 6480 /* Check real SLIM for any errors */ 6481 slimword0 = readl(phba->MBslimaddr); 6482 slimmb = (MAILBOX_t *) & slimword0; 6483 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 6484 && slimmb->mbxStatus) { 6485 psli->sli_flag &= 6486 ~LPFC_SLI_ACTIVE; 6487 word0 = slimword0; 6488 } 6489 } 6490 } else { 6491 /* First copy command data */ 6492 word0 = readl(phba->MBslimaddr); 6493 } 6494 /* Read the HBA Host Attention Register */ 6495 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 6496 spin_unlock_irqrestore(&phba->hbalock, 6497 drvr_flag); 6498 goto out_not_finished; 6499 } 6500 } 6501 6502 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 6503 /* copy results back to user */ 6504 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 6505 /* Copy the mailbox extension data */ 6506 if (pmbox->out_ext_byte_len && pmbox->context2) { 6507 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 6508 pmbox->context2, 6509 pmbox->out_ext_byte_len); 6510 } 6511 } else { 6512 /* First copy command data */ 6513 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 6514 MAILBOX_CMD_SIZE); 6515 /* Copy the mailbox extension data */ 6516 if (pmbox->out_ext_byte_len && pmbox->context2) { 6517 lpfc_memcpy_from_slim(pmbox->context2, 6518 phba->MBslimaddr + 6519 MAILBOX_HBA_EXT_OFFSET, 6520 pmbox->out_ext_byte_len); 6521 } 6522 } 6523 6524 writel(HA_MBATT, phba->HAregaddr); 6525 readl(phba->HAregaddr); /* flush */ 6526 6527 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6528 status = mb->mbxStatus; 6529 } 6530 6531 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6532 return status; 6533 6534 out_not_finished: 6535 if (processing_queue) { 6536 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 6537 lpfc_mbox_cmpl_put(phba, pmbox); 6538 } 6539 return MBX_NOT_FINISHED; 6540 } 6541 6542 /** 6543 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 6544 * @phba: Pointer to HBA context object. 6545 * 6546 * The function blocks the posting of SLI4 asynchronous mailbox commands from 6547 * the driver internal pending mailbox queue. It will then try to wait out the 6548 * possible outstanding mailbox command before return. 6549 * 6550 * Returns: 6551 * 0 - the outstanding mailbox command completed; otherwise, the wait for 6552 * the outstanding mailbox command timed out. 6553 **/ 6554 static int 6555 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 6556 { 6557 struct lpfc_sli *psli = &phba->sli; 6558 uint8_t actcmd = MBX_HEARTBEAT; 6559 int rc = 0; 6560 unsigned long timeout; 6561 6562 /* Mark the asynchronous mailbox command posting as blocked */ 6563 spin_lock_irq(&phba->hbalock); 6564 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6565 if (phba->sli.mbox_active) 6566 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 6567 spin_unlock_irq(&phba->hbalock); 6568 /* Determine how long we might wait for the active mailbox 6569 * command to be gracefully completed by firmware. 6570 */ 6571 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 6572 jiffies; 6573 /* Wait for the outstnading mailbox command to complete */ 6574 while (phba->sli.mbox_active) { 6575 /* Check active mailbox complete status every 2ms */ 6576 msleep(2); 6577 if (time_after(jiffies, timeout)) { 6578 /* Timeout, marked the outstanding cmd not complete */ 6579 rc = 1; 6580 break; 6581 } 6582 } 6583 6584 /* Can not cleanly block async mailbox command, fails it */ 6585 if (rc) { 6586 spin_lock_irq(&phba->hbalock); 6587 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6588 spin_unlock_irq(&phba->hbalock); 6589 } 6590 return rc; 6591 } 6592 6593 /** 6594 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 6595 * @phba: Pointer to HBA context object. 6596 * 6597 * The function unblocks and resume posting of SLI4 asynchronous mailbox 6598 * commands from the driver internal pending mailbox queue. It makes sure 6599 * that there is no outstanding mailbox command before resuming posting 6600 * asynchronous mailbox commands. If, for any reason, there is outstanding 6601 * mailbox command, it will try to wait it out before resuming asynchronous 6602 * mailbox command posting. 6603 **/ 6604 static void 6605 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 6606 { 6607 struct lpfc_sli *psli = &phba->sli; 6608 6609 spin_lock_irq(&phba->hbalock); 6610 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6611 /* Asynchronous mailbox posting is not blocked, do nothing */ 6612 spin_unlock_irq(&phba->hbalock); 6613 return; 6614 } 6615 6616 /* Outstanding synchronous mailbox command is guaranteed to be done, 6617 * successful or timeout, after timing-out the outstanding mailbox 6618 * command shall always be removed, so just unblock posting async 6619 * mailbox command and resume 6620 */ 6621 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6622 spin_unlock_irq(&phba->hbalock); 6623 6624 /* wake up worker thread to post asynchronlous mailbox command */ 6625 lpfc_worker_wake_up(phba); 6626 } 6627 6628 /** 6629 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 6630 * @phba: Pointer to HBA context object. 6631 * @mboxq: Pointer to mailbox object. 6632 * 6633 * The function posts a mailbox to the port. The mailbox is expected 6634 * to be comletely filled in and ready for the port to operate on it. 6635 * This routine executes a synchronous completion operation on the 6636 * mailbox by polling for its completion. 6637 * 6638 * The caller must not be holding any locks when calling this routine. 6639 * 6640 * Returns: 6641 * MBX_SUCCESS - mailbox posted successfully 6642 * Any of the MBX error values. 6643 **/ 6644 static int 6645 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 6646 { 6647 int rc = MBX_SUCCESS; 6648 unsigned long iflag; 6649 uint32_t db_ready; 6650 uint32_t mcqe_status; 6651 uint32_t mbx_cmnd; 6652 unsigned long timeout; 6653 struct lpfc_sli *psli = &phba->sli; 6654 struct lpfc_mqe *mb = &mboxq->u.mqe; 6655 struct lpfc_bmbx_create *mbox_rgn; 6656 struct dma_address *dma_address; 6657 struct lpfc_register bmbx_reg; 6658 6659 /* 6660 * Only one mailbox can be active to the bootstrap mailbox region 6661 * at a time and there is no queueing provided. 6662 */ 6663 spin_lock_irqsave(&phba->hbalock, iflag); 6664 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6665 spin_unlock_irqrestore(&phba->hbalock, iflag); 6666 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6667 "(%d):2532 Mailbox command x%x (x%x) " 6668 "cannot issue Data: x%x x%x\n", 6669 mboxq->vport ? mboxq->vport->vpi : 0, 6670 mboxq->u.mb.mbxCommand, 6671 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6672 psli->sli_flag, MBX_POLL); 6673 return MBXERR_ERROR; 6674 } 6675 /* The server grabs the token and owns it until release */ 6676 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6677 phba->sli.mbox_active = mboxq; 6678 spin_unlock_irqrestore(&phba->hbalock, iflag); 6679 6680 /* 6681 * Initialize the bootstrap memory region to avoid stale data areas 6682 * in the mailbox post. Then copy the caller's mailbox contents to 6683 * the bmbx mailbox region. 6684 */ 6685 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 6686 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 6687 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 6688 sizeof(struct lpfc_mqe)); 6689 6690 /* Post the high mailbox dma address to the port and wait for ready. */ 6691 dma_address = &phba->sli4_hba.bmbx.dma_address; 6692 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 6693 6694 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 6695 * 1000) + jiffies; 6696 do { 6697 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6698 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 6699 if (!db_ready) 6700 msleep(2); 6701 6702 if (time_after(jiffies, timeout)) { 6703 rc = MBXERR_ERROR; 6704 goto exit; 6705 } 6706 } while (!db_ready); 6707 6708 /* Post the low mailbox dma address to the port. */ 6709 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 6710 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 6711 * 1000) + jiffies; 6712 do { 6713 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6714 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 6715 if (!db_ready) 6716 msleep(2); 6717 6718 if (time_after(jiffies, timeout)) { 6719 rc = MBXERR_ERROR; 6720 goto exit; 6721 } 6722 } while (!db_ready); 6723 6724 /* 6725 * Read the CQ to ensure the mailbox has completed. 6726 * If so, update the mailbox status so that the upper layers 6727 * can complete the request normally. 6728 */ 6729 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 6730 sizeof(struct lpfc_mqe)); 6731 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 6732 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 6733 sizeof(struct lpfc_mcqe)); 6734 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 6735 /* 6736 * When the CQE status indicates a failure and the mailbox status 6737 * indicates success then copy the CQE status into the mailbox status 6738 * (and prefix it with x4000). 6739 */ 6740 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 6741 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 6742 bf_set(lpfc_mqe_status, mb, 6743 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 6744 rc = MBXERR_ERROR; 6745 } else 6746 lpfc_sli4_swap_str(phba, mboxq); 6747 6748 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6749 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " 6750 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 6751 " x%x x%x CQ: x%x x%x x%x x%x\n", 6752 mboxq->vport ? mboxq->vport->vpi : 0, 6753 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), 6754 bf_get(lpfc_mqe_status, mb), 6755 mb->un.mb_words[0], mb->un.mb_words[1], 6756 mb->un.mb_words[2], mb->un.mb_words[3], 6757 mb->un.mb_words[4], mb->un.mb_words[5], 6758 mb->un.mb_words[6], mb->un.mb_words[7], 6759 mb->un.mb_words[8], mb->un.mb_words[9], 6760 mb->un.mb_words[10], mb->un.mb_words[11], 6761 mb->un.mb_words[12], mboxq->mcqe.word0, 6762 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 6763 mboxq->mcqe.trailer); 6764 exit: 6765 /* We are holding the token, no needed for lock when release */ 6766 spin_lock_irqsave(&phba->hbalock, iflag); 6767 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6768 phba->sli.mbox_active = NULL; 6769 spin_unlock_irqrestore(&phba->hbalock, iflag); 6770 return rc; 6771 } 6772 6773 /** 6774 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 6775 * @phba: Pointer to HBA context object. 6776 * @pmbox: Pointer to mailbox object. 6777 * @flag: Flag indicating how the mailbox need to be processed. 6778 * 6779 * This function is called by discovery code and HBA management code to submit 6780 * a mailbox command to firmware with SLI-4 interface spec. 6781 * 6782 * Return codes the caller owns the mailbox command after the return of the 6783 * function. 6784 **/ 6785 static int 6786 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 6787 uint32_t flag) 6788 { 6789 struct lpfc_sli *psli = &phba->sli; 6790 unsigned long iflags; 6791 int rc; 6792 6793 /* dump from issue mailbox command if setup */ 6794 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 6795 6796 rc = lpfc_mbox_dev_check(phba); 6797 if (unlikely(rc)) { 6798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6799 "(%d):2544 Mailbox command x%x (x%x) " 6800 "cannot issue Data: x%x x%x\n", 6801 mboxq->vport ? mboxq->vport->vpi : 0, 6802 mboxq->u.mb.mbxCommand, 6803 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6804 psli->sli_flag, flag); 6805 goto out_not_finished; 6806 } 6807 6808 /* Detect polling mode and jump to a handler */ 6809 if (!phba->sli4_hba.intr_enable) { 6810 if (flag == MBX_POLL) 6811 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 6812 else 6813 rc = -EIO; 6814 if (rc != MBX_SUCCESS) 6815 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6816 "(%d):2541 Mailbox command x%x " 6817 "(x%x) cannot issue Data: x%x x%x\n", 6818 mboxq->vport ? mboxq->vport->vpi : 0, 6819 mboxq->u.mb.mbxCommand, 6820 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6821 psli->sli_flag, flag); 6822 return rc; 6823 } else if (flag == MBX_POLL) { 6824 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6825 "(%d):2542 Try to issue mailbox command " 6826 "x%x (x%x) synchronously ahead of async" 6827 "mailbox command queue: x%x x%x\n", 6828 mboxq->vport ? mboxq->vport->vpi : 0, 6829 mboxq->u.mb.mbxCommand, 6830 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6831 psli->sli_flag, flag); 6832 /* Try to block the asynchronous mailbox posting */ 6833 rc = lpfc_sli4_async_mbox_block(phba); 6834 if (!rc) { 6835 /* Successfully blocked, now issue sync mbox cmd */ 6836 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 6837 if (rc != MBX_SUCCESS) 6838 lpfc_printf_log(phba, KERN_ERR, 6839 LOG_MBOX | LOG_SLI, 6840 "(%d):2597 Mailbox command " 6841 "x%x (x%x) cannot issue " 6842 "Data: x%x x%x\n", 6843 mboxq->vport ? 6844 mboxq->vport->vpi : 0, 6845 mboxq->u.mb.mbxCommand, 6846 lpfc_sli4_mbox_opcode_get(phba, 6847 mboxq), 6848 psli->sli_flag, flag); 6849 /* Unblock the async mailbox posting afterward */ 6850 lpfc_sli4_async_mbox_unblock(phba); 6851 } 6852 return rc; 6853 } 6854 6855 /* Now, interrupt mode asynchrous mailbox command */ 6856 rc = lpfc_mbox_cmd_check(phba, mboxq); 6857 if (rc) { 6858 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6859 "(%d):2543 Mailbox command x%x (x%x) " 6860 "cannot issue Data: x%x x%x\n", 6861 mboxq->vport ? mboxq->vport->vpi : 0, 6862 mboxq->u.mb.mbxCommand, 6863 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6864 psli->sli_flag, flag); 6865 goto out_not_finished; 6866 } 6867 6868 /* Put the mailbox command to the driver internal FIFO */ 6869 psli->slistat.mbox_busy++; 6870 spin_lock_irqsave(&phba->hbalock, iflags); 6871 lpfc_mbox_put(phba, mboxq); 6872 spin_unlock_irqrestore(&phba->hbalock, iflags); 6873 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6874 "(%d):0354 Mbox cmd issue - Enqueue Data: " 6875 "x%x (x%x) x%x x%x x%x\n", 6876 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 6877 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 6878 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6879 phba->pport->port_state, 6880 psli->sli_flag, MBX_NOWAIT); 6881 /* Wake up worker thread to transport mailbox command from head */ 6882 lpfc_worker_wake_up(phba); 6883 6884 return MBX_BUSY; 6885 6886 out_not_finished: 6887 return MBX_NOT_FINISHED; 6888 } 6889 6890 /** 6891 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 6892 * @phba: Pointer to HBA context object. 6893 * 6894 * This function is called by worker thread to send a mailbox command to 6895 * SLI4 HBA firmware. 6896 * 6897 **/ 6898 int 6899 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 6900 { 6901 struct lpfc_sli *psli = &phba->sli; 6902 LPFC_MBOXQ_t *mboxq; 6903 int rc = MBX_SUCCESS; 6904 unsigned long iflags; 6905 struct lpfc_mqe *mqe; 6906 uint32_t mbx_cmnd; 6907 6908 /* Check interrupt mode before post async mailbox command */ 6909 if (unlikely(!phba->sli4_hba.intr_enable)) 6910 return MBX_NOT_FINISHED; 6911 6912 /* Check for mailbox command service token */ 6913 spin_lock_irqsave(&phba->hbalock, iflags); 6914 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6915 spin_unlock_irqrestore(&phba->hbalock, iflags); 6916 return MBX_NOT_FINISHED; 6917 } 6918 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6919 spin_unlock_irqrestore(&phba->hbalock, iflags); 6920 return MBX_NOT_FINISHED; 6921 } 6922 if (unlikely(phba->sli.mbox_active)) { 6923 spin_unlock_irqrestore(&phba->hbalock, iflags); 6924 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6925 "0384 There is pending active mailbox cmd\n"); 6926 return MBX_NOT_FINISHED; 6927 } 6928 /* Take the mailbox command service token */ 6929 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 6930 6931 /* Get the next mailbox command from head of queue */ 6932 mboxq = lpfc_mbox_get(phba); 6933 6934 /* If no more mailbox command waiting for post, we're done */ 6935 if (!mboxq) { 6936 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6937 spin_unlock_irqrestore(&phba->hbalock, iflags); 6938 return MBX_SUCCESS; 6939 } 6940 phba->sli.mbox_active = mboxq; 6941 spin_unlock_irqrestore(&phba->hbalock, iflags); 6942 6943 /* Check device readiness for posting mailbox command */ 6944 rc = lpfc_mbox_dev_check(phba); 6945 if (unlikely(rc)) 6946 /* Driver clean routine will clean up pending mailbox */ 6947 goto out_not_finished; 6948 6949 /* Prepare the mbox command to be posted */ 6950 mqe = &mboxq->u.mqe; 6951 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 6952 6953 /* Start timer for the mbox_tmo and log some mailbox post messages */ 6954 mod_timer(&psli->mbox_tmo, (jiffies + 6955 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); 6956 6957 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6958 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " 6959 "x%x x%x\n", 6960 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 6961 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6962 phba->pport->port_state, psli->sli_flag); 6963 6964 if (mbx_cmnd != MBX_HEARTBEAT) { 6965 if (mboxq->vport) { 6966 lpfc_debugfs_disc_trc(mboxq->vport, 6967 LPFC_DISC_TRC_MBOX_VPORT, 6968 "MBOX Send vport: cmd:x%x mb:x%x x%x", 6969 mbx_cmnd, mqe->un.mb_words[0], 6970 mqe->un.mb_words[1]); 6971 } else { 6972 lpfc_debugfs_disc_trc(phba->pport, 6973 LPFC_DISC_TRC_MBOX, 6974 "MBOX Send: cmd:x%x mb:x%x x%x", 6975 mbx_cmnd, mqe->un.mb_words[0], 6976 mqe->un.mb_words[1]); 6977 } 6978 } 6979 psli->slistat.mbox_cmd++; 6980 6981 /* Post the mailbox command to the port */ 6982 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 6983 if (rc != MBX_SUCCESS) { 6984 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6985 "(%d):2533 Mailbox command x%x (x%x) " 6986 "cannot issue Data: x%x x%x\n", 6987 mboxq->vport ? mboxq->vport->vpi : 0, 6988 mboxq->u.mb.mbxCommand, 6989 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6990 psli->sli_flag, MBX_NOWAIT); 6991 goto out_not_finished; 6992 } 6993 6994 return rc; 6995 6996 out_not_finished: 6997 spin_lock_irqsave(&phba->hbalock, iflags); 6998 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 6999 __lpfc_mbox_cmpl_put(phba, mboxq); 7000 /* Release the token */ 7001 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7002 phba->sli.mbox_active = NULL; 7003 spin_unlock_irqrestore(&phba->hbalock, iflags); 7004 7005 return MBX_NOT_FINISHED; 7006 } 7007 7008 /** 7009 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 7010 * @phba: Pointer to HBA context object. 7011 * @pmbox: Pointer to mailbox object. 7012 * @flag: Flag indicating how the mailbox need to be processed. 7013 * 7014 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 7015 * the API jump table function pointer from the lpfc_hba struct. 7016 * 7017 * Return codes the caller owns the mailbox command after the return of the 7018 * function. 7019 **/ 7020 int 7021 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 7022 { 7023 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 7024 } 7025 7026 /** 7027 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 7028 * @phba: The hba struct for which this call is being executed. 7029 * @dev_grp: The HBA PCI-Device group number. 7030 * 7031 * This routine sets up the mbox interface API function jump table in @phba 7032 * struct. 7033 * Returns: 0 - success, -ENODEV - failure. 7034 **/ 7035 int 7036 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7037 { 7038 7039 switch (dev_grp) { 7040 case LPFC_PCI_DEV_LP: 7041 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 7042 phba->lpfc_sli_handle_slow_ring_event = 7043 lpfc_sli_handle_slow_ring_event_s3; 7044 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 7045 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 7046 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 7047 break; 7048 case LPFC_PCI_DEV_OC: 7049 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 7050 phba->lpfc_sli_handle_slow_ring_event = 7051 lpfc_sli_handle_slow_ring_event_s4; 7052 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 7053 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 7054 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 7055 break; 7056 default: 7057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7058 "1420 Invalid HBA PCI-device group: 0x%x\n", 7059 dev_grp); 7060 return -ENODEV; 7061 break; 7062 } 7063 return 0; 7064 } 7065 7066 /** 7067 * __lpfc_sli_ringtx_put - Add an iocb to the txq 7068 * @phba: Pointer to HBA context object. 7069 * @pring: Pointer to driver SLI ring object. 7070 * @piocb: Pointer to address of newly added command iocb. 7071 * 7072 * This function is called with hbalock held to add a command 7073 * iocb to the txq when SLI layer cannot submit the command iocb 7074 * to the ring. 7075 **/ 7076 void 7077 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7078 struct lpfc_iocbq *piocb) 7079 { 7080 /* Insert the caller's iocb in the txq tail for later processing. */ 7081 list_add_tail(&piocb->list, &pring->txq); 7082 pring->txq_cnt++; 7083 } 7084 7085 /** 7086 * lpfc_sli_next_iocb - Get the next iocb in the txq 7087 * @phba: Pointer to HBA context object. 7088 * @pring: Pointer to driver SLI ring object. 7089 * @piocb: Pointer to address of newly added command iocb. 7090 * 7091 * This function is called with hbalock held before a new 7092 * iocb is submitted to the firmware. This function checks 7093 * txq to flush the iocbs in txq to Firmware before 7094 * submitting new iocbs to the Firmware. 7095 * If there are iocbs in the txq which need to be submitted 7096 * to firmware, lpfc_sli_next_iocb returns the first element 7097 * of the txq after dequeuing it from txq. 7098 * If there is no iocb in the txq then the function will return 7099 * *piocb and *piocb is set to NULL. Caller needs to check 7100 * *piocb to find if there are more commands in the txq. 7101 **/ 7102 static struct lpfc_iocbq * 7103 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7104 struct lpfc_iocbq **piocb) 7105 { 7106 struct lpfc_iocbq * nextiocb; 7107 7108 nextiocb = lpfc_sli_ringtx_get(phba, pring); 7109 if (!nextiocb) { 7110 nextiocb = *piocb; 7111 *piocb = NULL; 7112 } 7113 7114 return nextiocb; 7115 } 7116 7117 /** 7118 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 7119 * @phba: Pointer to HBA context object. 7120 * @ring_number: SLI ring number to issue iocb on. 7121 * @piocb: Pointer to command iocb. 7122 * @flag: Flag indicating if this command can be put into txq. 7123 * 7124 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 7125 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 7126 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 7127 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 7128 * this function allows only iocbs for posting buffers. This function finds 7129 * next available slot in the command ring and posts the command to the 7130 * available slot and writes the port attention register to request HBA start 7131 * processing new iocb. If there is no slot available in the ring and 7132 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 7133 * the function returns IOCB_BUSY. 7134 * 7135 * This function is called with hbalock held. The function will return success 7136 * after it successfully submit the iocb to firmware or after adding to the 7137 * txq. 7138 **/ 7139 static int 7140 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 7141 struct lpfc_iocbq *piocb, uint32_t flag) 7142 { 7143 struct lpfc_iocbq *nextiocb; 7144 IOCB_t *iocb; 7145 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7146 7147 if (piocb->iocb_cmpl && (!piocb->vport) && 7148 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 7149 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 7150 lpfc_printf_log(phba, KERN_ERR, 7151 LOG_SLI | LOG_VPORT, 7152 "1807 IOCB x%x failed. No vport\n", 7153 piocb->iocb.ulpCommand); 7154 dump_stack(); 7155 return IOCB_ERROR; 7156 } 7157 7158 7159 /* If the PCI channel is in offline state, do not post iocbs. */ 7160 if (unlikely(pci_channel_offline(phba->pcidev))) 7161 return IOCB_ERROR; 7162 7163 /* If HBA has a deferred error attention, fail the iocb. */ 7164 if (unlikely(phba->hba_flag & DEFER_ERATT)) 7165 return IOCB_ERROR; 7166 7167 /* 7168 * We should never get an IOCB if we are in a < LINK_DOWN state 7169 */ 7170 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 7171 return IOCB_ERROR; 7172 7173 /* 7174 * Check to see if we are blocking IOCB processing because of a 7175 * outstanding event. 7176 */ 7177 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 7178 goto iocb_busy; 7179 7180 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 7181 /* 7182 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 7183 * can be issued if the link is not up. 7184 */ 7185 switch (piocb->iocb.ulpCommand) { 7186 case CMD_GEN_REQUEST64_CR: 7187 case CMD_GEN_REQUEST64_CX: 7188 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 7189 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 7190 FC_RCTL_DD_UNSOL_CMD) || 7191 (piocb->iocb.un.genreq64.w5.hcsw.Type != 7192 MENLO_TRANSPORT_TYPE)) 7193 7194 goto iocb_busy; 7195 break; 7196 case CMD_QUE_RING_BUF_CN: 7197 case CMD_QUE_RING_BUF64_CN: 7198 /* 7199 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 7200 * completion, iocb_cmpl MUST be 0. 7201 */ 7202 if (piocb->iocb_cmpl) 7203 piocb->iocb_cmpl = NULL; 7204 /*FALLTHROUGH*/ 7205 case CMD_CREATE_XRI_CR: 7206 case CMD_CLOSE_XRI_CN: 7207 case CMD_CLOSE_XRI_CX: 7208 break; 7209 default: 7210 goto iocb_busy; 7211 } 7212 7213 /* 7214 * For FCP commands, we must be in a state where we can process link 7215 * attention events. 7216 */ 7217 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 7218 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 7219 goto iocb_busy; 7220 } 7221 7222 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 7223 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 7224 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 7225 7226 if (iocb) 7227 lpfc_sli_update_ring(phba, pring); 7228 else 7229 lpfc_sli_update_full_ring(phba, pring); 7230 7231 if (!piocb) 7232 return IOCB_SUCCESS; 7233 7234 goto out_busy; 7235 7236 iocb_busy: 7237 pring->stats.iocb_cmd_delay++; 7238 7239 out_busy: 7240 7241 if (!(flag & SLI_IOCB_RET_IOCB)) { 7242 __lpfc_sli_ringtx_put(phba, pring, piocb); 7243 return IOCB_SUCCESS; 7244 } 7245 7246 return IOCB_BUSY; 7247 } 7248 7249 /** 7250 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 7251 * @phba: Pointer to HBA context object. 7252 * @piocb: Pointer to command iocb. 7253 * @sglq: Pointer to the scatter gather queue object. 7254 * 7255 * This routine converts the bpl or bde that is in the IOCB 7256 * to a sgl list for the sli4 hardware. The physical address 7257 * of the bpl/bde is converted back to a virtual address. 7258 * If the IOCB contains a BPL then the list of BDE's is 7259 * converted to sli4_sge's. If the IOCB contains a single 7260 * BDE then it is converted to a single sli_sge. 7261 * The IOCB is still in cpu endianess so the contents of 7262 * the bpl can be used without byte swapping. 7263 * 7264 * Returns valid XRI = Success, NO_XRI = Failure. 7265 **/ 7266 static uint16_t 7267 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 7268 struct lpfc_sglq *sglq) 7269 { 7270 uint16_t xritag = NO_XRI; 7271 struct ulp_bde64 *bpl = NULL; 7272 struct ulp_bde64 bde; 7273 struct sli4_sge *sgl = NULL; 7274 IOCB_t *icmd; 7275 int numBdes = 0; 7276 int i = 0; 7277 uint32_t offset = 0; /* accumulated offset in the sg request list */ 7278 int inbound = 0; /* number of sg reply entries inbound from firmware */ 7279 7280 if (!piocbq || !sglq) 7281 return xritag; 7282 7283 sgl = (struct sli4_sge *)sglq->sgl; 7284 icmd = &piocbq->iocb; 7285 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7286 numBdes = icmd->un.genreq64.bdl.bdeSize / 7287 sizeof(struct ulp_bde64); 7288 /* The addrHigh and addrLow fields within the IOCB 7289 * have not been byteswapped yet so there is no 7290 * need to swap them back. 7291 */ 7292 bpl = (struct ulp_bde64 *) 7293 ((struct lpfc_dmabuf *)piocbq->context3)->virt; 7294 7295 if (!bpl) 7296 return xritag; 7297 7298 for (i = 0; i < numBdes; i++) { 7299 /* Should already be byte swapped. */ 7300 sgl->addr_hi = bpl->addrHigh; 7301 sgl->addr_lo = bpl->addrLow; 7302 7303 sgl->word2 = le32_to_cpu(sgl->word2); 7304 if ((i+1) == numBdes) 7305 bf_set(lpfc_sli4_sge_last, sgl, 1); 7306 else 7307 bf_set(lpfc_sli4_sge_last, sgl, 0); 7308 /* swap the size field back to the cpu so we 7309 * can assign it to the sgl. 7310 */ 7311 bde.tus.w = le32_to_cpu(bpl->tus.w); 7312 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 7313 /* The offsets in the sgl need to be accumulated 7314 * separately for the request and reply lists. 7315 * The request is always first, the reply follows. 7316 */ 7317 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 7318 /* add up the reply sg entries */ 7319 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 7320 inbound++; 7321 /* first inbound? reset the offset */ 7322 if (inbound == 1) 7323 offset = 0; 7324 bf_set(lpfc_sli4_sge_offset, sgl, offset); 7325 offset += bde.tus.f.bdeSize; 7326 } 7327 sgl->word2 = cpu_to_le32(sgl->word2); 7328 bpl++; 7329 sgl++; 7330 } 7331 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 7332 /* The addrHigh and addrLow fields of the BDE have not 7333 * been byteswapped yet so they need to be swapped 7334 * before putting them in the sgl. 7335 */ 7336 sgl->addr_hi = 7337 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 7338 sgl->addr_lo = 7339 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 7340 sgl->word2 = le32_to_cpu(sgl->word2); 7341 bf_set(lpfc_sli4_sge_last, sgl, 1); 7342 sgl->word2 = cpu_to_le32(sgl->word2); 7343 sgl->sge_len = 7344 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 7345 } 7346 return sglq->sli4_xritag; 7347 } 7348 7349 /** 7350 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 7351 * @phba: Pointer to HBA context object. 7352 * 7353 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 7354 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 7355 * held. 7356 * 7357 * Return: index into SLI4 fast-path FCP queue index. 7358 **/ 7359 static uint32_t 7360 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7361 { 7362 ++phba->fcp_qidx; 7363 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 7364 phba->fcp_qidx = 0; 7365 7366 return phba->fcp_qidx; 7367 } 7368 7369 /** 7370 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 7371 * @phba: Pointer to HBA context object. 7372 * @piocb: Pointer to command iocb. 7373 * @wqe: Pointer to the work queue entry. 7374 * 7375 * This routine converts the iocb command to its Work Queue Entry 7376 * equivalent. The wqe pointer should not have any fields set when 7377 * this routine is called because it will memcpy over them. 7378 * This routine does not set the CQ_ID or the WQEC bits in the 7379 * wqe. 7380 * 7381 * Returns: 0 = Success, IOCB_ERROR = Failure. 7382 **/ 7383 static int 7384 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 7385 union lpfc_wqe *wqe) 7386 { 7387 uint32_t xmit_len = 0, total_len = 0; 7388 uint8_t ct = 0; 7389 uint32_t fip; 7390 uint32_t abort_tag; 7391 uint8_t command_type = ELS_COMMAND_NON_FIP; 7392 uint8_t cmnd; 7393 uint16_t xritag; 7394 uint16_t abrt_iotag; 7395 struct lpfc_iocbq *abrtiocbq; 7396 struct ulp_bde64 *bpl = NULL; 7397 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 7398 int numBdes, i; 7399 struct ulp_bde64 bde; 7400 struct lpfc_nodelist *ndlp; 7401 7402 fip = phba->hba_flag & HBA_FIP_SUPPORT; 7403 /* The fcp commands will set command type */ 7404 if (iocbq->iocb_flag & LPFC_IO_FCP) 7405 command_type = FCP_COMMAND; 7406 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 7407 command_type = ELS_COMMAND_FIP; 7408 else 7409 command_type = ELS_COMMAND_NON_FIP; 7410 7411 /* Some of the fields are in the right position already */ 7412 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 7413 abort_tag = (uint32_t) iocbq->iotag; 7414 xritag = iocbq->sli4_xritag; 7415 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 7416 /* words0-2 bpl convert bde */ 7417 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7418 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 7419 sizeof(struct ulp_bde64); 7420 bpl = (struct ulp_bde64 *) 7421 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 7422 if (!bpl) 7423 return IOCB_ERROR; 7424 7425 /* Should already be byte swapped. */ 7426 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 7427 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 7428 /* swap the size field back to the cpu so we 7429 * can assign it to the sgl. 7430 */ 7431 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 7432 xmit_len = wqe->generic.bde.tus.f.bdeSize; 7433 total_len = 0; 7434 for (i = 0; i < numBdes; i++) { 7435 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 7436 total_len += bde.tus.f.bdeSize; 7437 } 7438 } else 7439 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 7440 7441 iocbq->iocb.ulpIoTag = iocbq->iotag; 7442 cmnd = iocbq->iocb.ulpCommand; 7443 7444 switch (iocbq->iocb.ulpCommand) { 7445 case CMD_ELS_REQUEST64_CR: 7446 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7447 if (!iocbq->iocb.ulpLe) { 7448 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7449 "2007 Only Limited Edition cmd Format" 7450 " supported 0x%x\n", 7451 iocbq->iocb.ulpCommand); 7452 return IOCB_ERROR; 7453 } 7454 wqe->els_req.payload_len = xmit_len; 7455 /* Els_reguest64 has a TMO */ 7456 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 7457 iocbq->iocb.ulpTimeout); 7458 /* Need a VF for word 4 set the vf bit*/ 7459 bf_set(els_req64_vf, &wqe->els_req, 0); 7460 /* And a VFID for word 12 */ 7461 bf_set(els_req64_vfid, &wqe->els_req, 0); 7462 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 7463 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7464 iocbq->iocb.ulpContext); 7465 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 7466 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 7467 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 7468 if (command_type == ELS_COMMAND_FIP) { 7469 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 7470 >> LPFC_FIP_ELS_ID_SHIFT); 7471 } 7472 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 7473 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7474 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 7475 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 7476 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 7477 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 7478 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7479 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 7480 break; 7481 case CMD_XMIT_SEQUENCE64_CX: 7482 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 7483 iocbq->iocb.un.ulpWord[3]); 7484 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 7485 iocbq->iocb.unsli3.rcvsli3.ox_id); 7486 /* The entire sequence is transmitted for this IOCB */ 7487 xmit_len = total_len; 7488 cmnd = CMD_XMIT_SEQUENCE64_CR; 7489 case CMD_XMIT_SEQUENCE64_CR: 7490 /* word3 iocb=io_tag32 wqe=reserved */ 7491 wqe->xmit_sequence.rsvd3 = 0; 7492 /* word4 relative_offset memcpy */ 7493 /* word5 r_ctl/df_ctl memcpy */ 7494 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 7495 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 7496 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 7497 LPFC_WQE_IOD_WRITE); 7498 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 7499 LPFC_WQE_LENLOC_WORD12); 7500 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 7501 wqe->xmit_sequence.xmit_len = xmit_len; 7502 command_type = OTHER_COMMAND; 7503 break; 7504 case CMD_XMIT_BCAST64_CN: 7505 /* word3 iocb=iotag32 wqe=seq_payload_len */ 7506 wqe->xmit_bcast64.seq_payload_len = xmit_len; 7507 /* word4 iocb=rsvd wqe=rsvd */ 7508 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 7509 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 7510 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 7511 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7512 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 7513 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 7514 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 7515 LPFC_WQE_LENLOC_WORD3); 7516 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 7517 break; 7518 case CMD_FCP_IWRITE64_CR: 7519 command_type = FCP_COMMAND_DATA_OUT; 7520 /* word3 iocb=iotag wqe=payload_offset_len */ 7521 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7522 wqe->fcp_iwrite.payload_offset_len = 7523 xmit_len + sizeof(struct fcp_rsp); 7524 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 7525 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 7526 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 7527 iocbq->iocb.ulpFCP2Rcvy); 7528 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 7529 /* Always open the exchange */ 7530 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 7531 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 7532 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 7533 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 7534 LPFC_WQE_LENLOC_WORD4); 7535 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7536 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7537 break; 7538 case CMD_FCP_IREAD64_CR: 7539 /* word3 iocb=iotag wqe=payload_offset_len */ 7540 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7541 wqe->fcp_iread.payload_offset_len = 7542 xmit_len + sizeof(struct fcp_rsp); 7543 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 7544 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 7545 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 7546 iocbq->iocb.ulpFCP2Rcvy); 7547 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 7548 /* Always open the exchange */ 7549 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 7550 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 7551 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 7552 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 7553 LPFC_WQE_LENLOC_WORD4); 7554 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 7555 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 7556 break; 7557 case CMD_FCP_ICMND64_CR: 7558 /* word3 iocb=IO_TAG wqe=reserved */ 7559 wqe->fcp_icmd.rsrvd3 = 0; 7560 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 7561 /* Always open the exchange */ 7562 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 7563 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 7564 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 7565 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 7566 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 7567 LPFC_WQE_LENLOC_NONE); 7568 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 7569 break; 7570 case CMD_GEN_REQUEST64_CR: 7571 /* For this command calculate the xmit length of the 7572 * request bde. 7573 */ 7574 xmit_len = 0; 7575 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 7576 sizeof(struct ulp_bde64); 7577 for (i = 0; i < numBdes; i++) { 7578 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 7579 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 7580 break; 7581 xmit_len += bde.tus.f.bdeSize; 7582 } 7583 /* word3 iocb=IO_TAG wqe=request_payload_len */ 7584 wqe->gen_req.request_payload_len = xmit_len; 7585 /* word4 iocb=parameter wqe=relative_offset memcpy */ 7586 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 7587 /* word6 context tag copied in memcpy */ 7588 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 7589 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 7590 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7591 "2015 Invalid CT %x command 0x%x\n", 7592 ct, iocbq->iocb.ulpCommand); 7593 return IOCB_ERROR; 7594 } 7595 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 7596 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 7597 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 7598 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 7599 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 7600 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 7601 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7602 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 7603 command_type = OTHER_COMMAND; 7604 break; 7605 case CMD_XMIT_ELS_RSP64_CX: 7606 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7607 /* words0-2 BDE memcpy */ 7608 /* word3 iocb=iotag32 wqe=response_payload_len */ 7609 wqe->xmit_els_rsp.response_payload_len = xmit_len; 7610 /* word4 iocb=did wge=rsvd. */ 7611 wqe->xmit_els_rsp.rsvd4 = 0; 7612 /* word5 iocb=rsvd wge=did */ 7613 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 7614 iocbq->iocb.un.elsreq64.remoteID); 7615 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 7616 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7617 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 7618 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7619 iocbq->iocb.unsli3.rcvsli3.ox_id); 7620 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7621 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7622 phba->vpi_ids[iocbq->vport->vpi]); 7623 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 7624 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 7625 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 7626 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 7627 LPFC_WQE_LENLOC_WORD3); 7628 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 7629 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 7630 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7631 command_type = OTHER_COMMAND; 7632 break; 7633 case CMD_CLOSE_XRI_CN: 7634 case CMD_ABORT_XRI_CN: 7635 case CMD_ABORT_XRI_CX: 7636 /* words 0-2 memcpy should be 0 rserved */ 7637 /* port will send abts */ 7638 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 7639 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 7640 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 7641 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 7642 } else 7643 fip = 0; 7644 7645 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 7646 /* 7647 * The link is down, or the command was ELS_FIP 7648 * so the fw does not need to send abts 7649 * on the wire. 7650 */ 7651 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 7652 else 7653 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 7654 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 7655 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 7656 wqe->abort_cmd.rsrvd5 = 0; 7657 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 7658 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7659 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 7660 /* 7661 * The abort handler will send us CMD_ABORT_XRI_CN or 7662 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 7663 */ 7664 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 7665 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 7666 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 7667 LPFC_WQE_LENLOC_NONE); 7668 cmnd = CMD_ABORT_XRI_CX; 7669 command_type = OTHER_COMMAND; 7670 xritag = 0; 7671 break; 7672 case CMD_XMIT_BLS_RSP64_CX: 7673 /* As BLS ABTS RSP WQE is very different from other WQEs, 7674 * we re-construct this WQE here based on information in 7675 * iocbq from scratch. 7676 */ 7677 memset(wqe, 0, sizeof(union lpfc_wqe)); 7678 /* OX_ID is invariable to who sent ABTS to CT exchange */ 7679 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 7680 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 7681 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 7682 LPFC_ABTS_UNSOL_INT) { 7683 /* ABTS sent by initiator to CT exchange, the 7684 * RX_ID field will be filled with the newly 7685 * allocated responder XRI. 7686 */ 7687 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 7688 iocbq->sli4_xritag); 7689 } else { 7690 /* ABTS sent by responder to CT exchange, the 7691 * RX_ID field will be filled with the responder 7692 * RX_ID from ABTS. 7693 */ 7694 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 7695 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 7696 } 7697 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 7698 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 7699 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 7700 iocbq->iocb.ulpContext); 7701 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 7702 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 7703 LPFC_WQE_LENLOC_NONE); 7704 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 7705 command_type = OTHER_COMMAND; 7706 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 7707 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 7708 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 7709 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 7710 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 7711 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 7712 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 7713 } 7714 7715 break; 7716 case CMD_XRI_ABORTED_CX: 7717 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 7718 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 7719 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 7720 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 7721 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 7722 default: 7723 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7724 "2014 Invalid command 0x%x\n", 7725 iocbq->iocb.ulpCommand); 7726 return IOCB_ERROR; 7727 break; 7728 } 7729 7730 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7731 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 7732 wqe->generic.wqe_com.abort_tag = abort_tag; 7733 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 7734 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 7735 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 7736 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 7737 return 0; 7738 } 7739 7740 /** 7741 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 7742 * @phba: Pointer to HBA context object. 7743 * @ring_number: SLI ring number to issue iocb on. 7744 * @piocb: Pointer to command iocb. 7745 * @flag: Flag indicating if this command can be put into txq. 7746 * 7747 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 7748 * an iocb command to an HBA with SLI-4 interface spec. 7749 * 7750 * This function is called with hbalock held. The function will return success 7751 * after it successfully submit the iocb to firmware or after adding to the 7752 * txq. 7753 **/ 7754 static int 7755 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 7756 struct lpfc_iocbq *piocb, uint32_t flag) 7757 { 7758 struct lpfc_sglq *sglq; 7759 union lpfc_wqe wqe; 7760 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7761 7762 if (piocb->sli4_xritag == NO_XRI) { 7763 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 7764 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 7765 piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) 7766 sglq = NULL; 7767 else { 7768 if (pring->txq_cnt) { 7769 if (!(flag & SLI_IOCB_RET_IOCB)) { 7770 __lpfc_sli_ringtx_put(phba, 7771 pring, piocb); 7772 return IOCB_SUCCESS; 7773 } else { 7774 return IOCB_BUSY; 7775 } 7776 } else { 7777 sglq = __lpfc_sli_get_sglq(phba, piocb); 7778 if (!sglq) { 7779 if (!(flag & SLI_IOCB_RET_IOCB)) { 7780 __lpfc_sli_ringtx_put(phba, 7781 pring, 7782 piocb); 7783 return IOCB_SUCCESS; 7784 } else 7785 return IOCB_BUSY; 7786 } 7787 } 7788 } 7789 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 7790 /* These IO's already have an XRI and a mapped sgl. */ 7791 sglq = NULL; 7792 } else { 7793 /* 7794 * This is a continuation of a commandi,(CX) so this 7795 * sglq is on the active list 7796 */ 7797 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 7798 if (!sglq) 7799 return IOCB_ERROR; 7800 } 7801 7802 if (sglq) { 7803 piocb->sli4_lxritag = sglq->sli4_lxritag; 7804 piocb->sli4_xritag = sglq->sli4_xritag; 7805 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 7806 return IOCB_ERROR; 7807 } 7808 7809 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 7810 return IOCB_ERROR; 7811 7812 if ((piocb->iocb_flag & LPFC_IO_FCP) || 7813 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 7814 /* 7815 * For FCP command IOCB, get a new WQ index to distribute 7816 * WQE across the WQsr. On the other hand, for abort IOCB, 7817 * it carries the same WQ index to the original command 7818 * IOCB. 7819 */ 7820 if (piocb->iocb_flag & LPFC_IO_FCP) 7821 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 7822 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 7823 &wqe)) 7824 return IOCB_ERROR; 7825 } else { 7826 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 7827 return IOCB_ERROR; 7828 } 7829 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 7830 7831 return 0; 7832 } 7833 7834 /** 7835 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 7836 * 7837 * This routine wraps the actual lockless version for issusing IOCB function 7838 * pointer from the lpfc_hba struct. 7839 * 7840 * Return codes: 7841 * IOCB_ERROR - Error 7842 * IOCB_SUCCESS - Success 7843 * IOCB_BUSY - Busy 7844 **/ 7845 int 7846 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 7847 struct lpfc_iocbq *piocb, uint32_t flag) 7848 { 7849 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 7850 } 7851 7852 /** 7853 * lpfc_sli_api_table_setup - Set up sli api function jump table 7854 * @phba: The hba struct for which this call is being executed. 7855 * @dev_grp: The HBA PCI-Device group number. 7856 * 7857 * This routine sets up the SLI interface API function jump table in @phba 7858 * struct. 7859 * Returns: 0 - success, -ENODEV - failure. 7860 **/ 7861 int 7862 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7863 { 7864 7865 switch (dev_grp) { 7866 case LPFC_PCI_DEV_LP: 7867 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 7868 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 7869 break; 7870 case LPFC_PCI_DEV_OC: 7871 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 7872 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 7873 break; 7874 default: 7875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7876 "1419 Invalid HBA PCI-device group: 0x%x\n", 7877 dev_grp); 7878 return -ENODEV; 7879 break; 7880 } 7881 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 7882 return 0; 7883 } 7884 7885 /** 7886 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 7887 * @phba: Pointer to HBA context object. 7888 * @pring: Pointer to driver SLI ring object. 7889 * @piocb: Pointer to command iocb. 7890 * @flag: Flag indicating if this command can be put into txq. 7891 * 7892 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 7893 * function. This function gets the hbalock and calls 7894 * __lpfc_sli_issue_iocb function and will return the error returned 7895 * by __lpfc_sli_issue_iocb function. This wrapper is used by 7896 * functions which do not hold hbalock. 7897 **/ 7898 int 7899 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 7900 struct lpfc_iocbq *piocb, uint32_t flag) 7901 { 7902 unsigned long iflags; 7903 int rc; 7904 7905 spin_lock_irqsave(&phba->hbalock, iflags); 7906 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 7907 spin_unlock_irqrestore(&phba->hbalock, iflags); 7908 7909 return rc; 7910 } 7911 7912 /** 7913 * lpfc_extra_ring_setup - Extra ring setup function 7914 * @phba: Pointer to HBA context object. 7915 * 7916 * This function is called while driver attaches with the 7917 * HBA to setup the extra ring. The extra ring is used 7918 * only when driver needs to support target mode functionality 7919 * or IP over FC functionalities. 7920 * 7921 * This function is called with no lock held. 7922 **/ 7923 static int 7924 lpfc_extra_ring_setup( struct lpfc_hba *phba) 7925 { 7926 struct lpfc_sli *psli; 7927 struct lpfc_sli_ring *pring; 7928 7929 psli = &phba->sli; 7930 7931 /* Adjust cmd/rsp ring iocb entries more evenly */ 7932 7933 /* Take some away from the FCP ring */ 7934 pring = &psli->ring[psli->fcp_ring]; 7935 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 7936 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 7937 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 7938 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 7939 7940 /* and give them to the extra ring */ 7941 pring = &psli->ring[psli->extra_ring]; 7942 7943 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 7944 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 7945 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 7946 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 7947 7948 /* Setup default profile for this ring */ 7949 pring->iotag_max = 4096; 7950 pring->num_mask = 1; 7951 pring->prt[0].profile = 0; /* Mask 0 */ 7952 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 7953 pring->prt[0].type = phba->cfg_multi_ring_type; 7954 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 7955 return 0; 7956 } 7957 7958 /** 7959 * lpfc_sli_async_event_handler - ASYNC iocb handler function 7960 * @phba: Pointer to HBA context object. 7961 * @pring: Pointer to driver SLI ring object. 7962 * @iocbq: Pointer to iocb object. 7963 * 7964 * This function is called by the slow ring event handler 7965 * function when there is an ASYNC event iocb in the ring. 7966 * This function is called with no lock held. 7967 * Currently this function handles only temperature related 7968 * ASYNC events. The function decodes the temperature sensor 7969 * event message and posts events for the management applications. 7970 **/ 7971 static void 7972 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 7973 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 7974 { 7975 IOCB_t *icmd; 7976 uint16_t evt_code; 7977 uint16_t temp; 7978 struct temp_event temp_event_data; 7979 struct Scsi_Host *shost; 7980 uint32_t *iocb_w; 7981 7982 icmd = &iocbq->iocb; 7983 evt_code = icmd->un.asyncstat.evt_code; 7984 temp = icmd->ulpContext; 7985 7986 if ((evt_code != ASYNC_TEMP_WARN) && 7987 (evt_code != ASYNC_TEMP_SAFE)) { 7988 iocb_w = (uint32_t *) icmd; 7989 lpfc_printf_log(phba, 7990 KERN_ERR, 7991 LOG_SLI, 7992 "0346 Ring %d handler: unexpected ASYNC_STATUS" 7993 " evt_code 0x%x\n" 7994 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 7995 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 7996 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 7997 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 7998 pring->ringno, 7999 icmd->un.asyncstat.evt_code, 8000 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 8001 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 8002 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 8003 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 8004 8005 return; 8006 } 8007 temp_event_data.data = (uint32_t)temp; 8008 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 8009 if (evt_code == ASYNC_TEMP_WARN) { 8010 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 8011 lpfc_printf_log(phba, 8012 KERN_ERR, 8013 LOG_TEMP, 8014 "0347 Adapter is very hot, please take " 8015 "corrective action. temperature : %d Celsius\n", 8016 temp); 8017 } 8018 if (evt_code == ASYNC_TEMP_SAFE) { 8019 temp_event_data.event_code = LPFC_NORMAL_TEMP; 8020 lpfc_printf_log(phba, 8021 KERN_ERR, 8022 LOG_TEMP, 8023 "0340 Adapter temperature is OK now. " 8024 "temperature : %d Celsius\n", 8025 temp); 8026 } 8027 8028 /* Send temperature change event to applications */ 8029 shost = lpfc_shost_from_vport(phba->pport); 8030 fc_host_post_vendor_event(shost, fc_get_event_number(), 8031 sizeof(temp_event_data), (char *) &temp_event_data, 8032 LPFC_NL_VENDOR_ID); 8033 8034 } 8035 8036 8037 /** 8038 * lpfc_sli_setup - SLI ring setup function 8039 * @phba: Pointer to HBA context object. 8040 * 8041 * lpfc_sli_setup sets up rings of the SLI interface with 8042 * number of iocbs per ring and iotags. This function is 8043 * called while driver attach to the HBA and before the 8044 * interrupts are enabled. So there is no need for locking. 8045 * 8046 * This function always returns 0. 8047 **/ 8048 int 8049 lpfc_sli_setup(struct lpfc_hba *phba) 8050 { 8051 int i, totiocbsize = 0; 8052 struct lpfc_sli *psli = &phba->sli; 8053 struct lpfc_sli_ring *pring; 8054 8055 psli->num_rings = MAX_CONFIGURED_RINGS; 8056 psli->sli_flag = 0; 8057 psli->fcp_ring = LPFC_FCP_RING; 8058 psli->next_ring = LPFC_FCP_NEXT_RING; 8059 psli->extra_ring = LPFC_EXTRA_RING; 8060 8061 psli->iocbq_lookup = NULL; 8062 psli->iocbq_lookup_len = 0; 8063 psli->last_iotag = 0; 8064 8065 for (i = 0; i < psli->num_rings; i++) { 8066 pring = &psli->ring[i]; 8067 switch (i) { 8068 case LPFC_FCP_RING: /* ring 0 - FCP */ 8069 /* numCiocb and numRiocb are used in config_port */ 8070 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8071 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8072 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8073 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8074 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8075 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8076 pring->sizeCiocb = (phba->sli_rev == 3) ? 8077 SLI3_IOCB_CMD_SIZE : 8078 SLI2_IOCB_CMD_SIZE; 8079 pring->sizeRiocb = (phba->sli_rev == 3) ? 8080 SLI3_IOCB_RSP_SIZE : 8081 SLI2_IOCB_RSP_SIZE; 8082 pring->iotag_ctr = 0; 8083 pring->iotag_max = 8084 (phba->cfg_hba_queue_depth * 2); 8085 pring->fast_iotag = pring->iotag_max; 8086 pring->num_mask = 0; 8087 break; 8088 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8089 /* numCiocb and numRiocb are used in config_port */ 8090 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 8091 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 8092 pring->sizeCiocb = (phba->sli_rev == 3) ? 8093 SLI3_IOCB_CMD_SIZE : 8094 SLI2_IOCB_CMD_SIZE; 8095 pring->sizeRiocb = (phba->sli_rev == 3) ? 8096 SLI3_IOCB_RSP_SIZE : 8097 SLI2_IOCB_RSP_SIZE; 8098 pring->iotag_max = phba->cfg_hba_queue_depth; 8099 pring->num_mask = 0; 8100 break; 8101 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 8102 /* numCiocb and numRiocb are used in config_port */ 8103 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 8104 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 8105 pring->sizeCiocb = (phba->sli_rev == 3) ? 8106 SLI3_IOCB_CMD_SIZE : 8107 SLI2_IOCB_CMD_SIZE; 8108 pring->sizeRiocb = (phba->sli_rev == 3) ? 8109 SLI3_IOCB_RSP_SIZE : 8110 SLI2_IOCB_RSP_SIZE; 8111 pring->fast_iotag = 0; 8112 pring->iotag_ctr = 0; 8113 pring->iotag_max = 4096; 8114 pring->lpfc_sli_rcv_async_status = 8115 lpfc_sli_async_event_handler; 8116 pring->num_mask = LPFC_MAX_RING_MASK; 8117 pring->prt[0].profile = 0; /* Mask 0 */ 8118 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 8119 pring->prt[0].type = FC_TYPE_ELS; 8120 pring->prt[0].lpfc_sli_rcv_unsol_event = 8121 lpfc_els_unsol_event; 8122 pring->prt[1].profile = 0; /* Mask 1 */ 8123 pring->prt[1].rctl = FC_RCTL_ELS_REP; 8124 pring->prt[1].type = FC_TYPE_ELS; 8125 pring->prt[1].lpfc_sli_rcv_unsol_event = 8126 lpfc_els_unsol_event; 8127 pring->prt[2].profile = 0; /* Mask 2 */ 8128 /* NameServer Inquiry */ 8129 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 8130 /* NameServer */ 8131 pring->prt[2].type = FC_TYPE_CT; 8132 pring->prt[2].lpfc_sli_rcv_unsol_event = 8133 lpfc_ct_unsol_event; 8134 pring->prt[3].profile = 0; /* Mask 3 */ 8135 /* NameServer response */ 8136 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 8137 /* NameServer */ 8138 pring->prt[3].type = FC_TYPE_CT; 8139 pring->prt[3].lpfc_sli_rcv_unsol_event = 8140 lpfc_ct_unsol_event; 8141 /* abort unsolicited sequence */ 8142 pring->prt[4].profile = 0; /* Mask 4 */ 8143 pring->prt[4].rctl = FC_RCTL_BA_ABTS; 8144 pring->prt[4].type = FC_TYPE_BLS; 8145 pring->prt[4].lpfc_sli_rcv_unsol_event = 8146 lpfc_sli4_ct_abort_unsol_event; 8147 break; 8148 } 8149 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 8150 (pring->numRiocb * pring->sizeRiocb); 8151 } 8152 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 8153 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 8154 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 8155 "SLI2 SLIM Data: x%x x%lx\n", 8156 phba->brd_no, totiocbsize, 8157 (unsigned long) MAX_SLIM_IOCB_SIZE); 8158 } 8159 if (phba->cfg_multi_ring_support == 2) 8160 lpfc_extra_ring_setup(phba); 8161 8162 return 0; 8163 } 8164 8165 /** 8166 * lpfc_sli_queue_setup - Queue initialization function 8167 * @phba: Pointer to HBA context object. 8168 * 8169 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 8170 * ring. This function also initializes ring indices of each ring. 8171 * This function is called during the initialization of the SLI 8172 * interface of an HBA. 8173 * This function is called with no lock held and always returns 8174 * 1. 8175 **/ 8176 int 8177 lpfc_sli_queue_setup(struct lpfc_hba *phba) 8178 { 8179 struct lpfc_sli *psli; 8180 struct lpfc_sli_ring *pring; 8181 int i; 8182 8183 psli = &phba->sli; 8184 spin_lock_irq(&phba->hbalock); 8185 INIT_LIST_HEAD(&psli->mboxq); 8186 INIT_LIST_HEAD(&psli->mboxq_cmpl); 8187 /* Initialize list headers for txq and txcmplq as double linked lists */ 8188 for (i = 0; i < psli->num_rings; i++) { 8189 pring = &psli->ring[i]; 8190 pring->ringno = i; 8191 pring->next_cmdidx = 0; 8192 pring->local_getidx = 0; 8193 pring->cmdidx = 0; 8194 INIT_LIST_HEAD(&pring->txq); 8195 INIT_LIST_HEAD(&pring->txcmplq); 8196 INIT_LIST_HEAD(&pring->iocb_continueq); 8197 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 8198 INIT_LIST_HEAD(&pring->postbufq); 8199 } 8200 spin_unlock_irq(&phba->hbalock); 8201 return 1; 8202 } 8203 8204 /** 8205 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 8206 * @phba: Pointer to HBA context object. 8207 * 8208 * This routine flushes the mailbox command subsystem. It will unconditionally 8209 * flush all the mailbox commands in the three possible stages in the mailbox 8210 * command sub-system: pending mailbox command queue; the outstanding mailbox 8211 * command; and completed mailbox command queue. It is caller's responsibility 8212 * to make sure that the driver is in the proper state to flush the mailbox 8213 * command sub-system. Namely, the posting of mailbox commands into the 8214 * pending mailbox command queue from the various clients must be stopped; 8215 * either the HBA is in a state that it will never works on the outstanding 8216 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 8217 * mailbox command has been completed. 8218 **/ 8219 static void 8220 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 8221 { 8222 LIST_HEAD(completions); 8223 struct lpfc_sli *psli = &phba->sli; 8224 LPFC_MBOXQ_t *pmb; 8225 unsigned long iflag; 8226 8227 /* Flush all the mailbox commands in the mbox system */ 8228 spin_lock_irqsave(&phba->hbalock, iflag); 8229 /* The pending mailbox command queue */ 8230 list_splice_init(&phba->sli.mboxq, &completions); 8231 /* The outstanding active mailbox command */ 8232 if (psli->mbox_active) { 8233 list_add_tail(&psli->mbox_active->list, &completions); 8234 psli->mbox_active = NULL; 8235 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8236 } 8237 /* The completed mailbox command queue */ 8238 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 8239 spin_unlock_irqrestore(&phba->hbalock, iflag); 8240 8241 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 8242 while (!list_empty(&completions)) { 8243 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 8244 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 8245 if (pmb->mbox_cmpl) 8246 pmb->mbox_cmpl(phba, pmb); 8247 } 8248 } 8249 8250 /** 8251 * lpfc_sli_host_down - Vport cleanup function 8252 * @vport: Pointer to virtual port object. 8253 * 8254 * lpfc_sli_host_down is called to clean up the resources 8255 * associated with a vport before destroying virtual 8256 * port data structures. 8257 * This function does following operations: 8258 * - Free discovery resources associated with this virtual 8259 * port. 8260 * - Free iocbs associated with this virtual port in 8261 * the txq. 8262 * - Send abort for all iocb commands associated with this 8263 * vport in txcmplq. 8264 * 8265 * This function is called with no lock held and always returns 1. 8266 **/ 8267 int 8268 lpfc_sli_host_down(struct lpfc_vport *vport) 8269 { 8270 LIST_HEAD(completions); 8271 struct lpfc_hba *phba = vport->phba; 8272 struct lpfc_sli *psli = &phba->sli; 8273 struct lpfc_sli_ring *pring; 8274 struct lpfc_iocbq *iocb, *next_iocb; 8275 int i; 8276 unsigned long flags = 0; 8277 uint16_t prev_pring_flag; 8278 8279 lpfc_cleanup_discovery_resources(vport); 8280 8281 spin_lock_irqsave(&phba->hbalock, flags); 8282 for (i = 0; i < psli->num_rings; i++) { 8283 pring = &psli->ring[i]; 8284 prev_pring_flag = pring->flag; 8285 /* Only slow rings */ 8286 if (pring->ringno == LPFC_ELS_RING) { 8287 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8288 /* Set the lpfc data pending flag */ 8289 set_bit(LPFC_DATA_READY, &phba->data_flags); 8290 } 8291 /* 8292 * Error everything on the txq since these iocbs have not been 8293 * given to the FW yet. 8294 */ 8295 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 8296 if (iocb->vport != vport) 8297 continue; 8298 list_move_tail(&iocb->list, &completions); 8299 pring->txq_cnt--; 8300 } 8301 8302 /* Next issue ABTS for everything on the txcmplq */ 8303 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 8304 list) { 8305 if (iocb->vport != vport) 8306 continue; 8307 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 8308 } 8309 8310 pring->flag = prev_pring_flag; 8311 } 8312 8313 spin_unlock_irqrestore(&phba->hbalock, flags); 8314 8315 /* Cancel all the IOCBs from the completions list */ 8316 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8317 IOERR_SLI_DOWN); 8318 return 1; 8319 } 8320 8321 /** 8322 * lpfc_sli_hba_down - Resource cleanup function for the HBA 8323 * @phba: Pointer to HBA context object. 8324 * 8325 * This function cleans up all iocb, buffers, mailbox commands 8326 * while shutting down the HBA. This function is called with no 8327 * lock held and always returns 1. 8328 * This function does the following to cleanup driver resources: 8329 * - Free discovery resources for each virtual port 8330 * - Cleanup any pending fabric iocbs 8331 * - Iterate through the iocb txq and free each entry 8332 * in the list. 8333 * - Free up any buffer posted to the HBA 8334 * - Free mailbox commands in the mailbox queue. 8335 **/ 8336 int 8337 lpfc_sli_hba_down(struct lpfc_hba *phba) 8338 { 8339 LIST_HEAD(completions); 8340 struct lpfc_sli *psli = &phba->sli; 8341 struct lpfc_sli_ring *pring; 8342 struct lpfc_dmabuf *buf_ptr; 8343 unsigned long flags = 0; 8344 int i; 8345 8346 /* Shutdown the mailbox command sub-system */ 8347 lpfc_sli_mbox_sys_shutdown(phba); 8348 8349 lpfc_hba_down_prep(phba); 8350 8351 lpfc_fabric_abort_hba(phba); 8352 8353 spin_lock_irqsave(&phba->hbalock, flags); 8354 for (i = 0; i < psli->num_rings; i++) { 8355 pring = &psli->ring[i]; 8356 /* Only slow rings */ 8357 if (pring->ringno == LPFC_ELS_RING) { 8358 pring->flag |= LPFC_DEFERRED_RING_EVENT; 8359 /* Set the lpfc data pending flag */ 8360 set_bit(LPFC_DATA_READY, &phba->data_flags); 8361 } 8362 8363 /* 8364 * Error everything on the txq since these iocbs have not been 8365 * given to the FW yet. 8366 */ 8367 list_splice_init(&pring->txq, &completions); 8368 pring->txq_cnt = 0; 8369 8370 } 8371 spin_unlock_irqrestore(&phba->hbalock, flags); 8372 8373 /* Cancel all the IOCBs from the completions list */ 8374 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8375 IOERR_SLI_DOWN); 8376 8377 spin_lock_irqsave(&phba->hbalock, flags); 8378 list_splice_init(&phba->elsbuf, &completions); 8379 phba->elsbuf_cnt = 0; 8380 phba->elsbuf_prev_cnt = 0; 8381 spin_unlock_irqrestore(&phba->hbalock, flags); 8382 8383 while (!list_empty(&completions)) { 8384 list_remove_head(&completions, buf_ptr, 8385 struct lpfc_dmabuf, list); 8386 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 8387 kfree(buf_ptr); 8388 } 8389 8390 /* Return any active mbox cmds */ 8391 del_timer_sync(&psli->mbox_tmo); 8392 8393 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 8394 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 8395 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 8396 8397 return 1; 8398 } 8399 8400 /** 8401 * lpfc_sli_pcimem_bcopy - SLI memory copy function 8402 * @srcp: Source memory pointer. 8403 * @destp: Destination memory pointer. 8404 * @cnt: Number of words required to be copied. 8405 * 8406 * This function is used for copying data between driver memory 8407 * and the SLI memory. This function also changes the endianness 8408 * of each word if native endianness is different from SLI 8409 * endianness. This function can be called with or without 8410 * lock. 8411 **/ 8412 void 8413 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 8414 { 8415 uint32_t *src = srcp; 8416 uint32_t *dest = destp; 8417 uint32_t ldata; 8418 int i; 8419 8420 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 8421 ldata = *src; 8422 ldata = le32_to_cpu(ldata); 8423 *dest = ldata; 8424 src++; 8425 dest++; 8426 } 8427 } 8428 8429 8430 /** 8431 * lpfc_sli_bemem_bcopy - SLI memory copy function 8432 * @srcp: Source memory pointer. 8433 * @destp: Destination memory pointer. 8434 * @cnt: Number of words required to be copied. 8435 * 8436 * This function is used for copying data between a data structure 8437 * with big endian representation to local endianness. 8438 * This function can be called with or without lock. 8439 **/ 8440 void 8441 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 8442 { 8443 uint32_t *src = srcp; 8444 uint32_t *dest = destp; 8445 uint32_t ldata; 8446 int i; 8447 8448 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 8449 ldata = *src; 8450 ldata = be32_to_cpu(ldata); 8451 *dest = ldata; 8452 src++; 8453 dest++; 8454 } 8455 } 8456 8457 /** 8458 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 8459 * @phba: Pointer to HBA context object. 8460 * @pring: Pointer to driver SLI ring object. 8461 * @mp: Pointer to driver buffer object. 8462 * 8463 * This function is called with no lock held. 8464 * It always return zero after adding the buffer to the postbufq 8465 * buffer list. 8466 **/ 8467 int 8468 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8469 struct lpfc_dmabuf *mp) 8470 { 8471 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 8472 later */ 8473 spin_lock_irq(&phba->hbalock); 8474 list_add_tail(&mp->list, &pring->postbufq); 8475 pring->postbufq_cnt++; 8476 spin_unlock_irq(&phba->hbalock); 8477 return 0; 8478 } 8479 8480 /** 8481 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 8482 * @phba: Pointer to HBA context object. 8483 * 8484 * When HBQ is enabled, buffers are searched based on tags. This function 8485 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 8486 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 8487 * does not conflict with tags of buffer posted for unsolicited events. 8488 * The function returns the allocated tag. The function is called with 8489 * no locks held. 8490 **/ 8491 uint32_t 8492 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 8493 { 8494 spin_lock_irq(&phba->hbalock); 8495 phba->buffer_tag_count++; 8496 /* 8497 * Always set the QUE_BUFTAG_BIT to distiguish between 8498 * a tag assigned by HBQ. 8499 */ 8500 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 8501 spin_unlock_irq(&phba->hbalock); 8502 return phba->buffer_tag_count; 8503 } 8504 8505 /** 8506 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 8507 * @phba: Pointer to HBA context object. 8508 * @pring: Pointer to driver SLI ring object. 8509 * @tag: Buffer tag. 8510 * 8511 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 8512 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 8513 * iocb is posted to the response ring with the tag of the buffer. 8514 * This function searches the pring->postbufq list using the tag 8515 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 8516 * iocb. If the buffer is found then lpfc_dmabuf object of the 8517 * buffer is returned to the caller else NULL is returned. 8518 * This function is called with no lock held. 8519 **/ 8520 struct lpfc_dmabuf * 8521 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8522 uint32_t tag) 8523 { 8524 struct lpfc_dmabuf *mp, *next_mp; 8525 struct list_head *slp = &pring->postbufq; 8526 8527 /* Search postbufq, from the beginning, looking for a match on tag */ 8528 spin_lock_irq(&phba->hbalock); 8529 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 8530 if (mp->buffer_tag == tag) { 8531 list_del_init(&mp->list); 8532 pring->postbufq_cnt--; 8533 spin_unlock_irq(&phba->hbalock); 8534 return mp; 8535 } 8536 } 8537 8538 spin_unlock_irq(&phba->hbalock); 8539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8540 "0402 Cannot find virtual addr for buffer tag on " 8541 "ring %d Data x%lx x%p x%p x%x\n", 8542 pring->ringno, (unsigned long) tag, 8543 slp->next, slp->prev, pring->postbufq_cnt); 8544 8545 return NULL; 8546 } 8547 8548 /** 8549 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 8550 * @phba: Pointer to HBA context object. 8551 * @pring: Pointer to driver SLI ring object. 8552 * @phys: DMA address of the buffer. 8553 * 8554 * This function searches the buffer list using the dma_address 8555 * of unsolicited event to find the driver's lpfc_dmabuf object 8556 * corresponding to the dma_address. The function returns the 8557 * lpfc_dmabuf object if a buffer is found else it returns NULL. 8558 * This function is called by the ct and els unsolicited event 8559 * handlers to get the buffer associated with the unsolicited 8560 * event. 8561 * 8562 * This function is called with no lock held. 8563 **/ 8564 struct lpfc_dmabuf * 8565 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8566 dma_addr_t phys) 8567 { 8568 struct lpfc_dmabuf *mp, *next_mp; 8569 struct list_head *slp = &pring->postbufq; 8570 8571 /* Search postbufq, from the beginning, looking for a match on phys */ 8572 spin_lock_irq(&phba->hbalock); 8573 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 8574 if (mp->phys == phys) { 8575 list_del_init(&mp->list); 8576 pring->postbufq_cnt--; 8577 spin_unlock_irq(&phba->hbalock); 8578 return mp; 8579 } 8580 } 8581 8582 spin_unlock_irq(&phba->hbalock); 8583 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8584 "0410 Cannot find virtual addr for mapped buf on " 8585 "ring %d Data x%llx x%p x%p x%x\n", 8586 pring->ringno, (unsigned long long)phys, 8587 slp->next, slp->prev, pring->postbufq_cnt); 8588 return NULL; 8589 } 8590 8591 /** 8592 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 8593 * @phba: Pointer to HBA context object. 8594 * @cmdiocb: Pointer to driver command iocb object. 8595 * @rspiocb: Pointer to driver response iocb object. 8596 * 8597 * This function is the completion handler for the abort iocbs for 8598 * ELS commands. This function is called from the ELS ring event 8599 * handler with no lock held. This function frees memory resources 8600 * associated with the abort iocb. 8601 **/ 8602 static void 8603 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 8604 struct lpfc_iocbq *rspiocb) 8605 { 8606 IOCB_t *irsp = &rspiocb->iocb; 8607 uint16_t abort_iotag, abort_context; 8608 struct lpfc_iocbq *abort_iocb; 8609 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 8610 8611 abort_iocb = NULL; 8612 8613 if (irsp->ulpStatus) { 8614 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 8615 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 8616 8617 spin_lock_irq(&phba->hbalock); 8618 if (phba->sli_rev < LPFC_SLI_REV4) { 8619 if (abort_iotag != 0 && 8620 abort_iotag <= phba->sli.last_iotag) 8621 abort_iocb = 8622 phba->sli.iocbq_lookup[abort_iotag]; 8623 } else 8624 /* For sli4 the abort_tag is the XRI, 8625 * so the abort routine puts the iotag of the iocb 8626 * being aborted in the context field of the abort 8627 * IOCB. 8628 */ 8629 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 8630 8631 /* 8632 * If the iocb is not found in Firmware queue the iocb 8633 * might have completed already. Do not free it again. 8634 */ 8635 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 8636 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) { 8637 spin_unlock_irq(&phba->hbalock); 8638 lpfc_sli_release_iocbq(phba, cmdiocb); 8639 return; 8640 } 8641 /* For SLI4 the ulpContext field for abort IOCB 8642 * holds the iotag of the IOCB being aborted so 8643 * the local abort_context needs to be reset to 8644 * match the aborted IOCBs ulpContext. 8645 */ 8646 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) 8647 abort_context = abort_iocb->iocb.ulpContext; 8648 } 8649 8650 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 8651 "0327 Cannot abort els iocb %p " 8652 "with tag %x context %x, abort status %x, " 8653 "abort code %x\n", 8654 abort_iocb, abort_iotag, abort_context, 8655 irsp->ulpStatus, irsp->un.ulpWord[4]); 8656 /* 8657 * make sure we have the right iocbq before taking it 8658 * off the txcmplq and try to call completion routine. 8659 */ 8660 if (!abort_iocb || 8661 abort_iocb->iocb.ulpContext != abort_context || 8662 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 8663 spin_unlock_irq(&phba->hbalock); 8664 else if (phba->sli_rev < LPFC_SLI_REV4) { 8665 /* 8666 * leave the SLI4 aborted command on the txcmplq 8667 * list and the command complete WCQE's XB bit 8668 * will tell whether the SGL (XRI) can be released 8669 * immediately or to the aborted SGL list for the 8670 * following abort XRI from the HBA. 8671 */ 8672 list_del_init(&abort_iocb->list); 8673 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) { 8674 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 8675 pring->txcmplq_cnt--; 8676 } 8677 8678 /* Firmware could still be in progress of DMAing 8679 * payload, so don't free data buffer till after 8680 * a hbeat. 8681 */ 8682 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 8683 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 8684 spin_unlock_irq(&phba->hbalock); 8685 8686 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 8687 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; 8688 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 8689 } else 8690 spin_unlock_irq(&phba->hbalock); 8691 } 8692 8693 lpfc_sli_release_iocbq(phba, cmdiocb); 8694 return; 8695 } 8696 8697 /** 8698 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 8699 * @phba: Pointer to HBA context object. 8700 * @cmdiocb: Pointer to driver command iocb object. 8701 * @rspiocb: Pointer to driver response iocb object. 8702 * 8703 * The function is called from SLI ring event handler with no 8704 * lock held. This function is the completion handler for ELS commands 8705 * which are aborted. The function frees memory resources used for 8706 * the aborted ELS commands. 8707 **/ 8708 static void 8709 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 8710 struct lpfc_iocbq *rspiocb) 8711 { 8712 IOCB_t *irsp = &rspiocb->iocb; 8713 8714 /* ELS cmd tag <ulpIoTag> completes */ 8715 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 8716 "0139 Ignoring ELS cmd tag x%x completion Data: " 8717 "x%x x%x x%x\n", 8718 irsp->ulpIoTag, irsp->ulpStatus, 8719 irsp->un.ulpWord[4], irsp->ulpTimeout); 8720 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 8721 lpfc_ct_free_iocb(phba, cmdiocb); 8722 else 8723 lpfc_els_free_iocb(phba, cmdiocb); 8724 return; 8725 } 8726 8727 /** 8728 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 8729 * @phba: Pointer to HBA context object. 8730 * @pring: Pointer to driver SLI ring object. 8731 * @cmdiocb: Pointer to driver command iocb object. 8732 * 8733 * This function issues an abort iocb for the provided command iocb down to 8734 * the port. Other than the case the outstanding command iocb is an abort 8735 * request, this function issues abort out unconditionally. This function is 8736 * called with hbalock held. The function returns 0 when it fails due to 8737 * memory allocation failure or when the command iocb is an abort request. 8738 **/ 8739 static int 8740 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8741 struct lpfc_iocbq *cmdiocb) 8742 { 8743 struct lpfc_vport *vport = cmdiocb->vport; 8744 struct lpfc_iocbq *abtsiocbp; 8745 IOCB_t *icmd = NULL; 8746 IOCB_t *iabt = NULL; 8747 int retval; 8748 8749 /* 8750 * There are certain command types we don't want to abort. And we 8751 * don't want to abort commands that are already in the process of 8752 * being aborted. 8753 */ 8754 icmd = &cmdiocb->iocb; 8755 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 8756 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 8757 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 8758 return 0; 8759 8760 /* issue ABTS for this IOCB based on iotag */ 8761 abtsiocbp = __lpfc_sli_get_iocbq(phba); 8762 if (abtsiocbp == NULL) 8763 return 0; 8764 8765 /* This signals the response to set the correct status 8766 * before calling the completion handler 8767 */ 8768 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 8769 8770 iabt = &abtsiocbp->iocb; 8771 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 8772 iabt->un.acxri.abortContextTag = icmd->ulpContext; 8773 if (phba->sli_rev == LPFC_SLI_REV4) { 8774 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 8775 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 8776 } 8777 else 8778 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 8779 iabt->ulpLe = 1; 8780 iabt->ulpClass = icmd->ulpClass; 8781 8782 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 8783 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 8784 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 8785 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 8786 8787 if (phba->link_state >= LPFC_LINK_UP) 8788 iabt->ulpCommand = CMD_ABORT_XRI_CN; 8789 else 8790 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 8791 8792 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 8793 8794 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 8795 "0339 Abort xri x%x, original iotag x%x, " 8796 "abort cmd iotag x%x\n", 8797 iabt->un.acxri.abortIoTag, 8798 iabt->un.acxri.abortContextTag, 8799 abtsiocbp->iotag); 8800 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 8801 8802 if (retval) 8803 __lpfc_sli_release_iocbq(phba, abtsiocbp); 8804 8805 /* 8806 * Caller to this routine should check for IOCB_ERROR 8807 * and handle it properly. This routine no longer removes 8808 * iocb off txcmplq and call compl in case of IOCB_ERROR. 8809 */ 8810 return retval; 8811 } 8812 8813 /** 8814 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 8815 * @phba: Pointer to HBA context object. 8816 * @pring: Pointer to driver SLI ring object. 8817 * @cmdiocb: Pointer to driver command iocb object. 8818 * 8819 * This function issues an abort iocb for the provided command iocb. In case 8820 * of unloading, the abort iocb will not be issued to commands on the ELS 8821 * ring. Instead, the callback function shall be changed to those commands 8822 * so that nothing happens when them finishes. This function is called with 8823 * hbalock held. The function returns 0 when the command iocb is an abort 8824 * request. 8825 **/ 8826 int 8827 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8828 struct lpfc_iocbq *cmdiocb) 8829 { 8830 struct lpfc_vport *vport = cmdiocb->vport; 8831 int retval = IOCB_ERROR; 8832 IOCB_t *icmd = NULL; 8833 8834 /* 8835 * There are certain command types we don't want to abort. And we 8836 * don't want to abort commands that are already in the process of 8837 * being aborted. 8838 */ 8839 icmd = &cmdiocb->iocb; 8840 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 8841 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 8842 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 8843 return 0; 8844 8845 /* 8846 * If we're unloading, don't abort iocb on the ELS ring, but change 8847 * the callback so that nothing happens when it finishes. 8848 */ 8849 if ((vport->load_flag & FC_UNLOADING) && 8850 (pring->ringno == LPFC_ELS_RING)) { 8851 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 8852 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 8853 else 8854 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 8855 goto abort_iotag_exit; 8856 } 8857 8858 /* Now, we try to issue the abort to the cmdiocb out */ 8859 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 8860 8861 abort_iotag_exit: 8862 /* 8863 * Caller to this routine should check for IOCB_ERROR 8864 * and handle it properly. This routine no longer removes 8865 * iocb off txcmplq and call compl in case of IOCB_ERROR. 8866 */ 8867 return retval; 8868 } 8869 8870 /** 8871 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 8872 * @phba: Pointer to HBA context object. 8873 * @pring: Pointer to driver SLI ring object. 8874 * 8875 * This function aborts all iocbs in the given ring and frees all the iocb 8876 * objects in txq. This function issues abort iocbs unconditionally for all 8877 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 8878 * to complete before the return of this function. The caller is not required 8879 * to hold any locks. 8880 **/ 8881 static void 8882 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 8883 { 8884 LIST_HEAD(completions); 8885 struct lpfc_iocbq *iocb, *next_iocb; 8886 8887 if (pring->ringno == LPFC_ELS_RING) 8888 lpfc_fabric_abort_hba(phba); 8889 8890 spin_lock_irq(&phba->hbalock); 8891 8892 /* Take off all the iocbs on txq for cancelling */ 8893 list_splice_init(&pring->txq, &completions); 8894 pring->txq_cnt = 0; 8895 8896 /* Next issue ABTS for everything on the txcmplq */ 8897 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 8898 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 8899 8900 spin_unlock_irq(&phba->hbalock); 8901 8902 /* Cancel all the IOCBs from the completions list */ 8903 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 8904 IOERR_SLI_ABORTED); 8905 } 8906 8907 /** 8908 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 8909 * @phba: pointer to lpfc HBA data structure. 8910 * 8911 * This routine will abort all pending and outstanding iocbs to an HBA. 8912 **/ 8913 void 8914 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 8915 { 8916 struct lpfc_sli *psli = &phba->sli; 8917 struct lpfc_sli_ring *pring; 8918 int i; 8919 8920 for (i = 0; i < psli->num_rings; i++) { 8921 pring = &psli->ring[i]; 8922 lpfc_sli_iocb_ring_abort(phba, pring); 8923 } 8924 } 8925 8926 /** 8927 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 8928 * @iocbq: Pointer to driver iocb object. 8929 * @vport: Pointer to driver virtual port object. 8930 * @tgt_id: SCSI ID of the target. 8931 * @lun_id: LUN ID of the scsi device. 8932 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 8933 * 8934 * This function acts as an iocb filter for functions which abort or count 8935 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 8936 * 0 if the filtering criteria is met for the given iocb and will return 8937 * 1 if the filtering criteria is not met. 8938 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 8939 * given iocb is for the SCSI device specified by vport, tgt_id and 8940 * lun_id parameter. 8941 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 8942 * given iocb is for the SCSI target specified by vport and tgt_id 8943 * parameters. 8944 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 8945 * given iocb is for the SCSI host associated with the given vport. 8946 * This function is called with no locks held. 8947 **/ 8948 static int 8949 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 8950 uint16_t tgt_id, uint64_t lun_id, 8951 lpfc_ctx_cmd ctx_cmd) 8952 { 8953 struct lpfc_scsi_buf *lpfc_cmd; 8954 int rc = 1; 8955 8956 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 8957 return rc; 8958 8959 if (iocbq->vport != vport) 8960 return rc; 8961 8962 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 8963 8964 if (lpfc_cmd->pCmd == NULL) 8965 return rc; 8966 8967 switch (ctx_cmd) { 8968 case LPFC_CTX_LUN: 8969 if ((lpfc_cmd->rdata->pnode) && 8970 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 8971 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 8972 rc = 0; 8973 break; 8974 case LPFC_CTX_TGT: 8975 if ((lpfc_cmd->rdata->pnode) && 8976 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 8977 rc = 0; 8978 break; 8979 case LPFC_CTX_HOST: 8980 rc = 0; 8981 break; 8982 default: 8983 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 8984 __func__, ctx_cmd); 8985 break; 8986 } 8987 8988 return rc; 8989 } 8990 8991 /** 8992 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 8993 * @vport: Pointer to virtual port. 8994 * @tgt_id: SCSI ID of the target. 8995 * @lun_id: LUN ID of the scsi device. 8996 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 8997 * 8998 * This function returns number of FCP commands pending for the vport. 8999 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 9000 * commands pending on the vport associated with SCSI device specified 9001 * by tgt_id and lun_id parameters. 9002 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 9003 * commands pending on the vport associated with SCSI target specified 9004 * by tgt_id parameter. 9005 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 9006 * commands pending on the vport. 9007 * This function returns the number of iocbs which satisfy the filter. 9008 * This function is called without any lock held. 9009 **/ 9010 int 9011 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 9012 lpfc_ctx_cmd ctx_cmd) 9013 { 9014 struct lpfc_hba *phba = vport->phba; 9015 struct lpfc_iocbq *iocbq; 9016 int sum, i; 9017 9018 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 9019 iocbq = phba->sli.iocbq_lookup[i]; 9020 9021 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 9022 ctx_cmd) == 0) 9023 sum++; 9024 } 9025 9026 return sum; 9027 } 9028 9029 /** 9030 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 9031 * @phba: Pointer to HBA context object 9032 * @cmdiocb: Pointer to command iocb object. 9033 * @rspiocb: Pointer to response iocb object. 9034 * 9035 * This function is called when an aborted FCP iocb completes. This 9036 * function is called by the ring event handler with no lock held. 9037 * This function frees the iocb. 9038 **/ 9039 void 9040 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9041 struct lpfc_iocbq *rspiocb) 9042 { 9043 lpfc_sli_release_iocbq(phba, cmdiocb); 9044 return; 9045 } 9046 9047 /** 9048 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 9049 * @vport: Pointer to virtual port. 9050 * @pring: Pointer to driver SLI ring object. 9051 * @tgt_id: SCSI ID of the target. 9052 * @lun_id: LUN ID of the scsi device. 9053 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9054 * 9055 * This function sends an abort command for every SCSI command 9056 * associated with the given virtual port pending on the ring 9057 * filtered by lpfc_sli_validate_fcp_iocb function. 9058 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 9059 * FCP iocbs associated with lun specified by tgt_id and lun_id 9060 * parameters 9061 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 9062 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 9063 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 9064 * FCP iocbs associated with virtual port. 9065 * This function returns number of iocbs it failed to abort. 9066 * This function is called with no locks held. 9067 **/ 9068 int 9069 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 9070 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 9071 { 9072 struct lpfc_hba *phba = vport->phba; 9073 struct lpfc_iocbq *iocbq; 9074 struct lpfc_iocbq *abtsiocb; 9075 IOCB_t *cmd = NULL; 9076 int errcnt = 0, ret_val = 0; 9077 int i; 9078 9079 for (i = 1; i <= phba->sli.last_iotag; i++) { 9080 iocbq = phba->sli.iocbq_lookup[i]; 9081 9082 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 9083 abort_cmd) != 0) 9084 continue; 9085 9086 /* issue ABTS for this IOCB based on iotag */ 9087 abtsiocb = lpfc_sli_get_iocbq(phba); 9088 if (abtsiocb == NULL) { 9089 errcnt++; 9090 continue; 9091 } 9092 9093 cmd = &iocbq->iocb; 9094 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 9095 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 9096 if (phba->sli_rev == LPFC_SLI_REV4) 9097 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 9098 else 9099 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 9100 abtsiocb->iocb.ulpLe = 1; 9101 abtsiocb->iocb.ulpClass = cmd->ulpClass; 9102 abtsiocb->vport = phba->pport; 9103 9104 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9105 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 9106 if (iocbq->iocb_flag & LPFC_IO_FCP) 9107 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 9108 9109 if (lpfc_is_link_up(phba)) 9110 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 9111 else 9112 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 9113 9114 /* Setup callback routine and issue the command. */ 9115 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 9116 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 9117 abtsiocb, 0); 9118 if (ret_val == IOCB_ERROR) { 9119 lpfc_sli_release_iocbq(phba, abtsiocb); 9120 errcnt++; 9121 continue; 9122 } 9123 } 9124 9125 return errcnt; 9126 } 9127 9128 /** 9129 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 9130 * @phba: Pointer to HBA context object. 9131 * @cmdiocbq: Pointer to command iocb. 9132 * @rspiocbq: Pointer to response iocb. 9133 * 9134 * This function is the completion handler for iocbs issued using 9135 * lpfc_sli_issue_iocb_wait function. This function is called by the 9136 * ring event handler function without any lock held. This function 9137 * can be called from both worker thread context and interrupt 9138 * context. This function also can be called from other thread which 9139 * cleans up the SLI layer objects. 9140 * This function copy the contents of the response iocb to the 9141 * response iocb memory object provided by the caller of 9142 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 9143 * sleeps for the iocb completion. 9144 **/ 9145 static void 9146 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 9147 struct lpfc_iocbq *cmdiocbq, 9148 struct lpfc_iocbq *rspiocbq) 9149 { 9150 wait_queue_head_t *pdone_q; 9151 unsigned long iflags; 9152 struct lpfc_scsi_buf *lpfc_cmd; 9153 9154 spin_lock_irqsave(&phba->hbalock, iflags); 9155 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 9156 if (cmdiocbq->context2 && rspiocbq) 9157 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 9158 &rspiocbq->iocb, sizeof(IOCB_t)); 9159 9160 /* Set the exchange busy flag for task management commands */ 9161 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 9162 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 9163 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 9164 cur_iocbq); 9165 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 9166 } 9167 9168 pdone_q = cmdiocbq->context_un.wait_queue; 9169 if (pdone_q) 9170 wake_up(pdone_q); 9171 spin_unlock_irqrestore(&phba->hbalock, iflags); 9172 return; 9173 } 9174 9175 /** 9176 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 9177 * @phba: Pointer to HBA context object.. 9178 * @piocbq: Pointer to command iocb. 9179 * @flag: Flag to test. 9180 * 9181 * This routine grabs the hbalock and then test the iocb_flag to 9182 * see if the passed in flag is set. 9183 * Returns: 9184 * 1 if flag is set. 9185 * 0 if flag is not set. 9186 **/ 9187 static int 9188 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 9189 struct lpfc_iocbq *piocbq, uint32_t flag) 9190 { 9191 unsigned long iflags; 9192 int ret; 9193 9194 spin_lock_irqsave(&phba->hbalock, iflags); 9195 ret = piocbq->iocb_flag & flag; 9196 spin_unlock_irqrestore(&phba->hbalock, iflags); 9197 return ret; 9198 9199 } 9200 9201 /** 9202 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 9203 * @phba: Pointer to HBA context object.. 9204 * @pring: Pointer to sli ring. 9205 * @piocb: Pointer to command iocb. 9206 * @prspiocbq: Pointer to response iocb. 9207 * @timeout: Timeout in number of seconds. 9208 * 9209 * This function issues the iocb to firmware and waits for the 9210 * iocb to complete. If the iocb command is not 9211 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 9212 * Caller should not free the iocb resources if this function 9213 * returns IOCB_TIMEDOUT. 9214 * The function waits for the iocb completion using an 9215 * non-interruptible wait. 9216 * This function will sleep while waiting for iocb completion. 9217 * So, this function should not be called from any context which 9218 * does not allow sleeping. Due to the same reason, this function 9219 * cannot be called with interrupt disabled. 9220 * This function assumes that the iocb completions occur while 9221 * this function sleep. So, this function cannot be called from 9222 * the thread which process iocb completion for this ring. 9223 * This function clears the iocb_flag of the iocb object before 9224 * issuing the iocb and the iocb completion handler sets this 9225 * flag and wakes this thread when the iocb completes. 9226 * The contents of the response iocb will be copied to prspiocbq 9227 * by the completion handler when the command completes. 9228 * This function returns IOCB_SUCCESS when success. 9229 * This function is called with no lock held. 9230 **/ 9231 int 9232 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 9233 uint32_t ring_number, 9234 struct lpfc_iocbq *piocb, 9235 struct lpfc_iocbq *prspiocbq, 9236 uint32_t timeout) 9237 { 9238 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9239 long timeleft, timeout_req = 0; 9240 int retval = IOCB_SUCCESS; 9241 uint32_t creg_val; 9242 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 9243 /* 9244 * If the caller has provided a response iocbq buffer, then context2 9245 * is NULL or its an error. 9246 */ 9247 if (prspiocbq) { 9248 if (piocb->context2) 9249 return IOCB_ERROR; 9250 piocb->context2 = prspiocbq; 9251 } 9252 9253 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 9254 piocb->context_un.wait_queue = &done_q; 9255 piocb->iocb_flag &= ~LPFC_IO_WAKE; 9256 9257 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9258 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9259 return IOCB_ERROR; 9260 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 9261 writel(creg_val, phba->HCregaddr); 9262 readl(phba->HCregaddr); /* flush */ 9263 } 9264 9265 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 9266 SLI_IOCB_RET_IOCB); 9267 if (retval == IOCB_SUCCESS) { 9268 timeout_req = timeout * HZ; 9269 timeleft = wait_event_timeout(done_q, 9270 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 9271 timeout_req); 9272 9273 if (piocb->iocb_flag & LPFC_IO_WAKE) { 9274 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9275 "0331 IOCB wake signaled\n"); 9276 } else if (timeleft == 0) { 9277 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9278 "0338 IOCB wait timeout error - no " 9279 "wake response Data x%x\n", timeout); 9280 retval = IOCB_TIMEDOUT; 9281 } else { 9282 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9283 "0330 IOCB wake NOT set, " 9284 "Data x%x x%lx\n", 9285 timeout, (timeleft / jiffies)); 9286 retval = IOCB_TIMEDOUT; 9287 } 9288 } else if (retval == IOCB_BUSY) { 9289 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9290 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 9291 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 9292 return retval; 9293 } else { 9294 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9295 "0332 IOCB wait issue failed, Data x%x\n", 9296 retval); 9297 retval = IOCB_ERROR; 9298 } 9299 9300 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 9301 if (lpfc_readl(phba->HCregaddr, &creg_val)) 9302 return IOCB_ERROR; 9303 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 9304 writel(creg_val, phba->HCregaddr); 9305 readl(phba->HCregaddr); /* flush */ 9306 } 9307 9308 if (prspiocbq) 9309 piocb->context2 = NULL; 9310 9311 piocb->context_un.wait_queue = NULL; 9312 piocb->iocb_cmpl = NULL; 9313 return retval; 9314 } 9315 9316 /** 9317 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 9318 * @phba: Pointer to HBA context object. 9319 * @pmboxq: Pointer to driver mailbox object. 9320 * @timeout: Timeout in number of seconds. 9321 * 9322 * This function issues the mailbox to firmware and waits for the 9323 * mailbox command to complete. If the mailbox command is not 9324 * completed within timeout seconds, it returns MBX_TIMEOUT. 9325 * The function waits for the mailbox completion using an 9326 * interruptible wait. If the thread is woken up due to a 9327 * signal, MBX_TIMEOUT error is returned to the caller. Caller 9328 * should not free the mailbox resources, if this function returns 9329 * MBX_TIMEOUT. 9330 * This function will sleep while waiting for mailbox completion. 9331 * So, this function should not be called from any context which 9332 * does not allow sleeping. Due to the same reason, this function 9333 * cannot be called with interrupt disabled. 9334 * This function assumes that the mailbox completion occurs while 9335 * this function sleep. So, this function cannot be called from 9336 * the worker thread which processes mailbox completion. 9337 * This function is called in the context of HBA management 9338 * applications. 9339 * This function returns MBX_SUCCESS when successful. 9340 * This function is called with no lock held. 9341 **/ 9342 int 9343 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 9344 uint32_t timeout) 9345 { 9346 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 9347 int retval; 9348 unsigned long flag; 9349 9350 /* The caller must leave context1 empty. */ 9351 if (pmboxq->context1) 9352 return MBX_NOT_FINISHED; 9353 9354 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 9355 /* setup wake call as IOCB callback */ 9356 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 9357 /* setup context field to pass wait_queue pointer to wake function */ 9358 pmboxq->context1 = &done_q; 9359 9360 /* now issue the command */ 9361 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 9362 9363 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 9364 wait_event_interruptible_timeout(done_q, 9365 pmboxq->mbox_flag & LPFC_MBX_WAKE, 9366 timeout * HZ); 9367 9368 spin_lock_irqsave(&phba->hbalock, flag); 9369 pmboxq->context1 = NULL; 9370 /* 9371 * if LPFC_MBX_WAKE flag is set the mailbox is completed 9372 * else do not free the resources. 9373 */ 9374 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 9375 retval = MBX_SUCCESS; 9376 lpfc_sli4_swap_str(phba, pmboxq); 9377 } else { 9378 retval = MBX_TIMEOUT; 9379 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 9380 } 9381 spin_unlock_irqrestore(&phba->hbalock, flag); 9382 } 9383 9384 return retval; 9385 } 9386 9387 /** 9388 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 9389 * @phba: Pointer to HBA context. 9390 * 9391 * This function is called to shutdown the driver's mailbox sub-system. 9392 * It first marks the mailbox sub-system is in a block state to prevent 9393 * the asynchronous mailbox command from issued off the pending mailbox 9394 * command queue. If the mailbox command sub-system shutdown is due to 9395 * HBA error conditions such as EEH or ERATT, this routine shall invoke 9396 * the mailbox sub-system flush routine to forcefully bring down the 9397 * mailbox sub-system. Otherwise, if it is due to normal condition (such 9398 * as with offline or HBA function reset), this routine will wait for the 9399 * outstanding mailbox command to complete before invoking the mailbox 9400 * sub-system flush routine to gracefully bring down mailbox sub-system. 9401 **/ 9402 void 9403 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 9404 { 9405 struct lpfc_sli *psli = &phba->sli; 9406 uint8_t actcmd = MBX_HEARTBEAT; 9407 unsigned long timeout; 9408 9409 spin_lock_irq(&phba->hbalock); 9410 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9411 spin_unlock_irq(&phba->hbalock); 9412 9413 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9414 spin_lock_irq(&phba->hbalock); 9415 if (phba->sli.mbox_active) 9416 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 9417 spin_unlock_irq(&phba->hbalock); 9418 /* Determine how long we might wait for the active mailbox 9419 * command to be gracefully completed by firmware. 9420 */ 9421 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 9422 1000) + jiffies; 9423 while (phba->sli.mbox_active) { 9424 /* Check active mailbox complete status every 2ms */ 9425 msleep(2); 9426 if (time_after(jiffies, timeout)) 9427 /* Timeout, let the mailbox flush routine to 9428 * forcefully release active mailbox command 9429 */ 9430 break; 9431 } 9432 } 9433 lpfc_sli_mbox_sys_flush(phba); 9434 } 9435 9436 /** 9437 * lpfc_sli_eratt_read - read sli-3 error attention events 9438 * @phba: Pointer to HBA context. 9439 * 9440 * This function is called to read the SLI3 device error attention registers 9441 * for possible error attention events. The caller must hold the hostlock 9442 * with spin_lock_irq(). 9443 * 9444 * This function returns 1 when there is Error Attention in the Host Attention 9445 * Register and returns 0 otherwise. 9446 **/ 9447 static int 9448 lpfc_sli_eratt_read(struct lpfc_hba *phba) 9449 { 9450 uint32_t ha_copy; 9451 9452 /* Read chip Host Attention (HA) register */ 9453 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 9454 goto unplug_err; 9455 9456 if (ha_copy & HA_ERATT) { 9457 /* Read host status register to retrieve error event */ 9458 if (lpfc_sli_read_hs(phba)) 9459 goto unplug_err; 9460 9461 /* Check if there is a deferred error condition is active */ 9462 if ((HS_FFER1 & phba->work_hs) && 9463 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 9464 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 9465 phba->hba_flag |= DEFER_ERATT; 9466 /* Clear all interrupt enable conditions */ 9467 writel(0, phba->HCregaddr); 9468 readl(phba->HCregaddr); 9469 } 9470 9471 /* Set the driver HA work bitmap */ 9472 phba->work_ha |= HA_ERATT; 9473 /* Indicate polling handles this ERATT */ 9474 phba->hba_flag |= HBA_ERATT_HANDLED; 9475 return 1; 9476 } 9477 return 0; 9478 9479 unplug_err: 9480 /* Set the driver HS work bitmap */ 9481 phba->work_hs |= UNPLUG_ERR; 9482 /* Set the driver HA work bitmap */ 9483 phba->work_ha |= HA_ERATT; 9484 /* Indicate polling handles this ERATT */ 9485 phba->hba_flag |= HBA_ERATT_HANDLED; 9486 return 1; 9487 } 9488 9489 /** 9490 * lpfc_sli4_eratt_read - read sli-4 error attention events 9491 * @phba: Pointer to HBA context. 9492 * 9493 * This function is called to read the SLI4 device error attention registers 9494 * for possible error attention events. The caller must hold the hostlock 9495 * with spin_lock_irq(). 9496 * 9497 * This function returns 1 when there is Error Attention in the Host Attention 9498 * Register and returns 0 otherwise. 9499 **/ 9500 static int 9501 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 9502 { 9503 uint32_t uerr_sta_hi, uerr_sta_lo; 9504 uint32_t if_type, portsmphr; 9505 struct lpfc_register portstat_reg; 9506 9507 /* 9508 * For now, use the SLI4 device internal unrecoverable error 9509 * registers for error attention. This can be changed later. 9510 */ 9511 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 9512 switch (if_type) { 9513 case LPFC_SLI_INTF_IF_TYPE_0: 9514 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 9515 &uerr_sta_lo) || 9516 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 9517 &uerr_sta_hi)) { 9518 phba->work_hs |= UNPLUG_ERR; 9519 phba->work_ha |= HA_ERATT; 9520 phba->hba_flag |= HBA_ERATT_HANDLED; 9521 return 1; 9522 } 9523 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 9524 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 9525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9526 "1423 HBA Unrecoverable error: " 9527 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 9528 "ue_mask_lo_reg=0x%x, " 9529 "ue_mask_hi_reg=0x%x\n", 9530 uerr_sta_lo, uerr_sta_hi, 9531 phba->sli4_hba.ue_mask_lo, 9532 phba->sli4_hba.ue_mask_hi); 9533 phba->work_status[0] = uerr_sta_lo; 9534 phba->work_status[1] = uerr_sta_hi; 9535 phba->work_ha |= HA_ERATT; 9536 phba->hba_flag |= HBA_ERATT_HANDLED; 9537 return 1; 9538 } 9539 break; 9540 case LPFC_SLI_INTF_IF_TYPE_2: 9541 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 9542 &portstat_reg.word0) || 9543 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 9544 &portsmphr)){ 9545 phba->work_hs |= UNPLUG_ERR; 9546 phba->work_ha |= HA_ERATT; 9547 phba->hba_flag |= HBA_ERATT_HANDLED; 9548 return 1; 9549 } 9550 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 9551 phba->work_status[0] = 9552 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 9553 phba->work_status[1] = 9554 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 9555 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9556 "2885 Port Error Detected: " 9557 "port status reg 0x%x, " 9558 "port smphr reg 0x%x, " 9559 "error 1=0x%x, error 2=0x%x\n", 9560 portstat_reg.word0, 9561 portsmphr, 9562 phba->work_status[0], 9563 phba->work_status[1]); 9564 phba->work_ha |= HA_ERATT; 9565 phba->hba_flag |= HBA_ERATT_HANDLED; 9566 return 1; 9567 } 9568 break; 9569 case LPFC_SLI_INTF_IF_TYPE_1: 9570 default: 9571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9572 "2886 HBA Error Attention on unsupported " 9573 "if type %d.", if_type); 9574 return 1; 9575 } 9576 9577 return 0; 9578 } 9579 9580 /** 9581 * lpfc_sli_check_eratt - check error attention events 9582 * @phba: Pointer to HBA context. 9583 * 9584 * This function is called from timer soft interrupt context to check HBA's 9585 * error attention register bit for error attention events. 9586 * 9587 * This function returns 1 when there is Error Attention in the Host Attention 9588 * Register and returns 0 otherwise. 9589 **/ 9590 int 9591 lpfc_sli_check_eratt(struct lpfc_hba *phba) 9592 { 9593 uint32_t ha_copy; 9594 9595 /* If somebody is waiting to handle an eratt, don't process it 9596 * here. The brdkill function will do this. 9597 */ 9598 if (phba->link_flag & LS_IGNORE_ERATT) 9599 return 0; 9600 9601 /* Check if interrupt handler handles this ERATT */ 9602 spin_lock_irq(&phba->hbalock); 9603 if (phba->hba_flag & HBA_ERATT_HANDLED) { 9604 /* Interrupt handler has handled ERATT */ 9605 spin_unlock_irq(&phba->hbalock); 9606 return 0; 9607 } 9608 9609 /* 9610 * If there is deferred error attention, do not check for error 9611 * attention 9612 */ 9613 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 9614 spin_unlock_irq(&phba->hbalock); 9615 return 0; 9616 } 9617 9618 /* If PCI channel is offline, don't process it */ 9619 if (unlikely(pci_channel_offline(phba->pcidev))) { 9620 spin_unlock_irq(&phba->hbalock); 9621 return 0; 9622 } 9623 9624 switch (phba->sli_rev) { 9625 case LPFC_SLI_REV2: 9626 case LPFC_SLI_REV3: 9627 /* Read chip Host Attention (HA) register */ 9628 ha_copy = lpfc_sli_eratt_read(phba); 9629 break; 9630 case LPFC_SLI_REV4: 9631 /* Read device Uncoverable Error (UERR) registers */ 9632 ha_copy = lpfc_sli4_eratt_read(phba); 9633 break; 9634 default: 9635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9636 "0299 Invalid SLI revision (%d)\n", 9637 phba->sli_rev); 9638 ha_copy = 0; 9639 break; 9640 } 9641 spin_unlock_irq(&phba->hbalock); 9642 9643 return ha_copy; 9644 } 9645 9646 /** 9647 * lpfc_intr_state_check - Check device state for interrupt handling 9648 * @phba: Pointer to HBA context. 9649 * 9650 * This inline routine checks whether a device or its PCI slot is in a state 9651 * that the interrupt should be handled. 9652 * 9653 * This function returns 0 if the device or the PCI slot is in a state that 9654 * interrupt should be handled, otherwise -EIO. 9655 */ 9656 static inline int 9657 lpfc_intr_state_check(struct lpfc_hba *phba) 9658 { 9659 /* If the pci channel is offline, ignore all the interrupts */ 9660 if (unlikely(pci_channel_offline(phba->pcidev))) 9661 return -EIO; 9662 9663 /* Update device level interrupt statistics */ 9664 phba->sli.slistat.sli_intr++; 9665 9666 /* Ignore all interrupts during initialization. */ 9667 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 9668 return -EIO; 9669 9670 return 0; 9671 } 9672 9673 /** 9674 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 9675 * @irq: Interrupt number. 9676 * @dev_id: The device context pointer. 9677 * 9678 * This function is directly called from the PCI layer as an interrupt 9679 * service routine when device with SLI-3 interface spec is enabled with 9680 * MSI-X multi-message interrupt mode and there are slow-path events in 9681 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 9682 * interrupt mode, this function is called as part of the device-level 9683 * interrupt handler. When the PCI slot is in error recovery or the HBA 9684 * is undergoing initialization, the interrupt handler will not process 9685 * the interrupt. The link attention and ELS ring attention events are 9686 * handled by the worker thread. The interrupt handler signals the worker 9687 * thread and returns for these events. This function is called without 9688 * any lock held. It gets the hbalock to access and update SLI data 9689 * structures. 9690 * 9691 * This function returns IRQ_HANDLED when interrupt is handled else it 9692 * returns IRQ_NONE. 9693 **/ 9694 irqreturn_t 9695 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 9696 { 9697 struct lpfc_hba *phba; 9698 uint32_t ha_copy, hc_copy; 9699 uint32_t work_ha_copy; 9700 unsigned long status; 9701 unsigned long iflag; 9702 uint32_t control; 9703 9704 MAILBOX_t *mbox, *pmbox; 9705 struct lpfc_vport *vport; 9706 struct lpfc_nodelist *ndlp; 9707 struct lpfc_dmabuf *mp; 9708 LPFC_MBOXQ_t *pmb; 9709 int rc; 9710 9711 /* 9712 * Get the driver's phba structure from the dev_id and 9713 * assume the HBA is not interrupting. 9714 */ 9715 phba = (struct lpfc_hba *)dev_id; 9716 9717 if (unlikely(!phba)) 9718 return IRQ_NONE; 9719 9720 /* 9721 * Stuff needs to be attented to when this function is invoked as an 9722 * individual interrupt handler in MSI-X multi-message interrupt mode 9723 */ 9724 if (phba->intr_type == MSIX) { 9725 /* Check device state for handling interrupt */ 9726 if (lpfc_intr_state_check(phba)) 9727 return IRQ_NONE; 9728 /* Need to read HA REG for slow-path events */ 9729 spin_lock_irqsave(&phba->hbalock, iflag); 9730 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 9731 goto unplug_error; 9732 /* If somebody is waiting to handle an eratt don't process it 9733 * here. The brdkill function will do this. 9734 */ 9735 if (phba->link_flag & LS_IGNORE_ERATT) 9736 ha_copy &= ~HA_ERATT; 9737 /* Check the need for handling ERATT in interrupt handler */ 9738 if (ha_copy & HA_ERATT) { 9739 if (phba->hba_flag & HBA_ERATT_HANDLED) 9740 /* ERATT polling has handled ERATT */ 9741 ha_copy &= ~HA_ERATT; 9742 else 9743 /* Indicate interrupt handler handles ERATT */ 9744 phba->hba_flag |= HBA_ERATT_HANDLED; 9745 } 9746 9747 /* 9748 * If there is deferred error attention, do not check for any 9749 * interrupt. 9750 */ 9751 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 9752 spin_unlock_irqrestore(&phba->hbalock, iflag); 9753 return IRQ_NONE; 9754 } 9755 9756 /* Clear up only attention source related to slow-path */ 9757 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 9758 goto unplug_error; 9759 9760 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 9761 HC_LAINT_ENA | HC_ERINT_ENA), 9762 phba->HCregaddr); 9763 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 9764 phba->HAregaddr); 9765 writel(hc_copy, phba->HCregaddr); 9766 readl(phba->HAregaddr); /* flush */ 9767 spin_unlock_irqrestore(&phba->hbalock, iflag); 9768 } else 9769 ha_copy = phba->ha_copy; 9770 9771 work_ha_copy = ha_copy & phba->work_ha_mask; 9772 9773 if (work_ha_copy) { 9774 if (work_ha_copy & HA_LATT) { 9775 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 9776 /* 9777 * Turn off Link Attention interrupts 9778 * until CLEAR_LA done 9779 */ 9780 spin_lock_irqsave(&phba->hbalock, iflag); 9781 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 9782 if (lpfc_readl(phba->HCregaddr, &control)) 9783 goto unplug_error; 9784 control &= ~HC_LAINT_ENA; 9785 writel(control, phba->HCregaddr); 9786 readl(phba->HCregaddr); /* flush */ 9787 spin_unlock_irqrestore(&phba->hbalock, iflag); 9788 } 9789 else 9790 work_ha_copy &= ~HA_LATT; 9791 } 9792 9793 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 9794 /* 9795 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 9796 * the only slow ring. 9797 */ 9798 status = (work_ha_copy & 9799 (HA_RXMASK << (4*LPFC_ELS_RING))); 9800 status >>= (4*LPFC_ELS_RING); 9801 if (status & HA_RXMASK) { 9802 spin_lock_irqsave(&phba->hbalock, iflag); 9803 if (lpfc_readl(phba->HCregaddr, &control)) 9804 goto unplug_error; 9805 9806 lpfc_debugfs_slow_ring_trc(phba, 9807 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 9808 control, status, 9809 (uint32_t)phba->sli.slistat.sli_intr); 9810 9811 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 9812 lpfc_debugfs_slow_ring_trc(phba, 9813 "ISR Disable ring:" 9814 "pwork:x%x hawork:x%x wait:x%x", 9815 phba->work_ha, work_ha_copy, 9816 (uint32_t)((unsigned long) 9817 &phba->work_waitq)); 9818 9819 control &= 9820 ~(HC_R0INT_ENA << LPFC_ELS_RING); 9821 writel(control, phba->HCregaddr); 9822 readl(phba->HCregaddr); /* flush */ 9823 } 9824 else { 9825 lpfc_debugfs_slow_ring_trc(phba, 9826 "ISR slow ring: pwork:" 9827 "x%x hawork:x%x wait:x%x", 9828 phba->work_ha, work_ha_copy, 9829 (uint32_t)((unsigned long) 9830 &phba->work_waitq)); 9831 } 9832 spin_unlock_irqrestore(&phba->hbalock, iflag); 9833 } 9834 } 9835 spin_lock_irqsave(&phba->hbalock, iflag); 9836 if (work_ha_copy & HA_ERATT) { 9837 if (lpfc_sli_read_hs(phba)) 9838 goto unplug_error; 9839 /* 9840 * Check if there is a deferred error condition 9841 * is active 9842 */ 9843 if ((HS_FFER1 & phba->work_hs) && 9844 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 9845 HS_FFER6 | HS_FFER7 | HS_FFER8) & 9846 phba->work_hs)) { 9847 phba->hba_flag |= DEFER_ERATT; 9848 /* Clear all interrupt enable conditions */ 9849 writel(0, phba->HCregaddr); 9850 readl(phba->HCregaddr); 9851 } 9852 } 9853 9854 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 9855 pmb = phba->sli.mbox_active; 9856 pmbox = &pmb->u.mb; 9857 mbox = phba->mbox; 9858 vport = pmb->vport; 9859 9860 /* First check out the status word */ 9861 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 9862 if (pmbox->mbxOwner != OWN_HOST) { 9863 spin_unlock_irqrestore(&phba->hbalock, iflag); 9864 /* 9865 * Stray Mailbox Interrupt, mbxCommand <cmd> 9866 * mbxStatus <status> 9867 */ 9868 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 9869 LOG_SLI, 9870 "(%d):0304 Stray Mailbox " 9871 "Interrupt mbxCommand x%x " 9872 "mbxStatus x%x\n", 9873 (vport ? vport->vpi : 0), 9874 pmbox->mbxCommand, 9875 pmbox->mbxStatus); 9876 /* clear mailbox attention bit */ 9877 work_ha_copy &= ~HA_MBATT; 9878 } else { 9879 phba->sli.mbox_active = NULL; 9880 spin_unlock_irqrestore(&phba->hbalock, iflag); 9881 phba->last_completion_time = jiffies; 9882 del_timer(&phba->sli.mbox_tmo); 9883 if (pmb->mbox_cmpl) { 9884 lpfc_sli_pcimem_bcopy(mbox, pmbox, 9885 MAILBOX_CMD_SIZE); 9886 if (pmb->out_ext_byte_len && 9887 pmb->context2) 9888 lpfc_sli_pcimem_bcopy( 9889 phba->mbox_ext, 9890 pmb->context2, 9891 pmb->out_ext_byte_len); 9892 } 9893 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 9894 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 9895 9896 lpfc_debugfs_disc_trc(vport, 9897 LPFC_DISC_TRC_MBOX_VPORT, 9898 "MBOX dflt rpi: : " 9899 "status:x%x rpi:x%x", 9900 (uint32_t)pmbox->mbxStatus, 9901 pmbox->un.varWords[0], 0); 9902 9903 if (!pmbox->mbxStatus) { 9904 mp = (struct lpfc_dmabuf *) 9905 (pmb->context1); 9906 ndlp = (struct lpfc_nodelist *) 9907 pmb->context2; 9908 9909 /* Reg_LOGIN of dflt RPI was 9910 * successful. new lets get 9911 * rid of the RPI using the 9912 * same mbox buffer. 9913 */ 9914 lpfc_unreg_login(phba, 9915 vport->vpi, 9916 pmbox->un.varWords[0], 9917 pmb); 9918 pmb->mbox_cmpl = 9919 lpfc_mbx_cmpl_dflt_rpi; 9920 pmb->context1 = mp; 9921 pmb->context2 = ndlp; 9922 pmb->vport = vport; 9923 rc = lpfc_sli_issue_mbox(phba, 9924 pmb, 9925 MBX_NOWAIT); 9926 if (rc != MBX_BUSY) 9927 lpfc_printf_log(phba, 9928 KERN_ERR, 9929 LOG_MBOX | LOG_SLI, 9930 "0350 rc should have" 9931 "been MBX_BUSY\n"); 9932 if (rc != MBX_NOT_FINISHED) 9933 goto send_current_mbox; 9934 } 9935 } 9936 spin_lock_irqsave( 9937 &phba->pport->work_port_lock, 9938 iflag); 9939 phba->pport->work_port_events &= 9940 ~WORKER_MBOX_TMO; 9941 spin_unlock_irqrestore( 9942 &phba->pport->work_port_lock, 9943 iflag); 9944 lpfc_mbox_cmpl_put(phba, pmb); 9945 } 9946 } else 9947 spin_unlock_irqrestore(&phba->hbalock, iflag); 9948 9949 if ((work_ha_copy & HA_MBATT) && 9950 (phba->sli.mbox_active == NULL)) { 9951 send_current_mbox: 9952 /* Process next mailbox command if there is one */ 9953 do { 9954 rc = lpfc_sli_issue_mbox(phba, NULL, 9955 MBX_NOWAIT); 9956 } while (rc == MBX_NOT_FINISHED); 9957 if (rc != MBX_SUCCESS) 9958 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 9959 LOG_SLI, "0349 rc should be " 9960 "MBX_SUCCESS\n"); 9961 } 9962 9963 spin_lock_irqsave(&phba->hbalock, iflag); 9964 phba->work_ha |= work_ha_copy; 9965 spin_unlock_irqrestore(&phba->hbalock, iflag); 9966 lpfc_worker_wake_up(phba); 9967 } 9968 return IRQ_HANDLED; 9969 unplug_error: 9970 spin_unlock_irqrestore(&phba->hbalock, iflag); 9971 return IRQ_HANDLED; 9972 9973 } /* lpfc_sli_sp_intr_handler */ 9974 9975 /** 9976 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 9977 * @irq: Interrupt number. 9978 * @dev_id: The device context pointer. 9979 * 9980 * This function is directly called from the PCI layer as an interrupt 9981 * service routine when device with SLI-3 interface spec is enabled with 9982 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 9983 * ring event in the HBA. However, when the device is enabled with either 9984 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 9985 * device-level interrupt handler. When the PCI slot is in error recovery 9986 * or the HBA is undergoing initialization, the interrupt handler will not 9987 * process the interrupt. The SCSI FCP fast-path ring event are handled in 9988 * the intrrupt context. This function is called without any lock held. 9989 * It gets the hbalock to access and update SLI data structures. 9990 * 9991 * This function returns IRQ_HANDLED when interrupt is handled else it 9992 * returns IRQ_NONE. 9993 **/ 9994 irqreturn_t 9995 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 9996 { 9997 struct lpfc_hba *phba; 9998 uint32_t ha_copy; 9999 unsigned long status; 10000 unsigned long iflag; 10001 10002 /* Get the driver's phba structure from the dev_id and 10003 * assume the HBA is not interrupting. 10004 */ 10005 phba = (struct lpfc_hba *) dev_id; 10006 10007 if (unlikely(!phba)) 10008 return IRQ_NONE; 10009 10010 /* 10011 * Stuff needs to be attented to when this function is invoked as an 10012 * individual interrupt handler in MSI-X multi-message interrupt mode 10013 */ 10014 if (phba->intr_type == MSIX) { 10015 /* Check device state for handling interrupt */ 10016 if (lpfc_intr_state_check(phba)) 10017 return IRQ_NONE; 10018 /* Need to read HA REG for FCP ring and other ring events */ 10019 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10020 return IRQ_HANDLED; 10021 /* Clear up only attention source related to fast-path */ 10022 spin_lock_irqsave(&phba->hbalock, iflag); 10023 /* 10024 * If there is deferred error attention, do not check for 10025 * any interrupt. 10026 */ 10027 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10028 spin_unlock_irqrestore(&phba->hbalock, iflag); 10029 return IRQ_NONE; 10030 } 10031 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 10032 phba->HAregaddr); 10033 readl(phba->HAregaddr); /* flush */ 10034 spin_unlock_irqrestore(&phba->hbalock, iflag); 10035 } else 10036 ha_copy = phba->ha_copy; 10037 10038 /* 10039 * Process all events on FCP ring. Take the optimized path for FCP IO. 10040 */ 10041 ha_copy &= ~(phba->work_ha_mask); 10042 10043 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10044 status >>= (4*LPFC_FCP_RING); 10045 if (status & HA_RXMASK) 10046 lpfc_sli_handle_fast_ring_event(phba, 10047 &phba->sli.ring[LPFC_FCP_RING], 10048 status); 10049 10050 if (phba->cfg_multi_ring_support == 2) { 10051 /* 10052 * Process all events on extra ring. Take the optimized path 10053 * for extra ring IO. 10054 */ 10055 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10056 status >>= (4*LPFC_EXTRA_RING); 10057 if (status & HA_RXMASK) { 10058 lpfc_sli_handle_fast_ring_event(phba, 10059 &phba->sli.ring[LPFC_EXTRA_RING], 10060 status); 10061 } 10062 } 10063 return IRQ_HANDLED; 10064 } /* lpfc_sli_fp_intr_handler */ 10065 10066 /** 10067 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 10068 * @irq: Interrupt number. 10069 * @dev_id: The device context pointer. 10070 * 10071 * This function is the HBA device-level interrupt handler to device with 10072 * SLI-3 interface spec, called from the PCI layer when either MSI or 10073 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 10074 * requires driver attention. This function invokes the slow-path interrupt 10075 * attention handling function and fast-path interrupt attention handling 10076 * function in turn to process the relevant HBA attention events. This 10077 * function is called without any lock held. It gets the hbalock to access 10078 * and update SLI data structures. 10079 * 10080 * This function returns IRQ_HANDLED when interrupt is handled, else it 10081 * returns IRQ_NONE. 10082 **/ 10083 irqreturn_t 10084 lpfc_sli_intr_handler(int irq, void *dev_id) 10085 { 10086 struct lpfc_hba *phba; 10087 irqreturn_t sp_irq_rc, fp_irq_rc; 10088 unsigned long status1, status2; 10089 uint32_t hc_copy; 10090 10091 /* 10092 * Get the driver's phba structure from the dev_id and 10093 * assume the HBA is not interrupting. 10094 */ 10095 phba = (struct lpfc_hba *) dev_id; 10096 10097 if (unlikely(!phba)) 10098 return IRQ_NONE; 10099 10100 /* Check device state for handling interrupt */ 10101 if (lpfc_intr_state_check(phba)) 10102 return IRQ_NONE; 10103 10104 spin_lock(&phba->hbalock); 10105 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 10106 spin_unlock(&phba->hbalock); 10107 return IRQ_HANDLED; 10108 } 10109 10110 if (unlikely(!phba->ha_copy)) { 10111 spin_unlock(&phba->hbalock); 10112 return IRQ_NONE; 10113 } else if (phba->ha_copy & HA_ERATT) { 10114 if (phba->hba_flag & HBA_ERATT_HANDLED) 10115 /* ERATT polling has handled ERATT */ 10116 phba->ha_copy &= ~HA_ERATT; 10117 else 10118 /* Indicate interrupt handler handles ERATT */ 10119 phba->hba_flag |= HBA_ERATT_HANDLED; 10120 } 10121 10122 /* 10123 * If there is deferred error attention, do not check for any interrupt. 10124 */ 10125 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10126 spin_unlock(&phba->hbalock); 10127 return IRQ_NONE; 10128 } 10129 10130 /* Clear attention sources except link and error attentions */ 10131 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 10132 spin_unlock(&phba->hbalock); 10133 return IRQ_HANDLED; 10134 } 10135 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 10136 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 10137 phba->HCregaddr); 10138 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 10139 writel(hc_copy, phba->HCregaddr); 10140 readl(phba->HAregaddr); /* flush */ 10141 spin_unlock(&phba->hbalock); 10142 10143 /* 10144 * Invokes slow-path host attention interrupt handling as appropriate. 10145 */ 10146 10147 /* status of events with mailbox and link attention */ 10148 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 10149 10150 /* status of events with ELS ring */ 10151 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 10152 status2 >>= (4*LPFC_ELS_RING); 10153 10154 if (status1 || (status2 & HA_RXMASK)) 10155 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 10156 else 10157 sp_irq_rc = IRQ_NONE; 10158 10159 /* 10160 * Invoke fast-path host attention interrupt handling as appropriate. 10161 */ 10162 10163 /* status of events with FCP ring */ 10164 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 10165 status1 >>= (4*LPFC_FCP_RING); 10166 10167 /* status of events with extra ring */ 10168 if (phba->cfg_multi_ring_support == 2) { 10169 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 10170 status2 >>= (4*LPFC_EXTRA_RING); 10171 } else 10172 status2 = 0; 10173 10174 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 10175 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 10176 else 10177 fp_irq_rc = IRQ_NONE; 10178 10179 /* Return device-level interrupt handling status */ 10180 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 10181 } /* lpfc_sli_intr_handler */ 10182 10183 /** 10184 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 10185 * @phba: pointer to lpfc hba data structure. 10186 * 10187 * This routine is invoked by the worker thread to process all the pending 10188 * SLI4 FCP abort XRI events. 10189 **/ 10190 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 10191 { 10192 struct lpfc_cq_event *cq_event; 10193 10194 /* First, declare the fcp xri abort event has been handled */ 10195 spin_lock_irq(&phba->hbalock); 10196 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 10197 spin_unlock_irq(&phba->hbalock); 10198 /* Now, handle all the fcp xri abort events */ 10199 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 10200 /* Get the first event from the head of the event queue */ 10201 spin_lock_irq(&phba->hbalock); 10202 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 10203 cq_event, struct lpfc_cq_event, list); 10204 spin_unlock_irq(&phba->hbalock); 10205 /* Notify aborted XRI for FCP work queue */ 10206 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10207 /* Free the event processed back to the free pool */ 10208 lpfc_sli4_cq_event_release(phba, cq_event); 10209 } 10210 } 10211 10212 /** 10213 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 10214 * @phba: pointer to lpfc hba data structure. 10215 * 10216 * This routine is invoked by the worker thread to process all the pending 10217 * SLI4 els abort xri events. 10218 **/ 10219 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 10220 { 10221 struct lpfc_cq_event *cq_event; 10222 10223 /* First, declare the els xri abort event has been handled */ 10224 spin_lock_irq(&phba->hbalock); 10225 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 10226 spin_unlock_irq(&phba->hbalock); 10227 /* Now, handle all the els xri abort events */ 10228 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 10229 /* Get the first event from the head of the event queue */ 10230 spin_lock_irq(&phba->hbalock); 10231 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 10232 cq_event, struct lpfc_cq_event, list); 10233 spin_unlock_irq(&phba->hbalock); 10234 /* Notify aborted XRI for ELS work queue */ 10235 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 10236 /* Free the event processed back to the free pool */ 10237 lpfc_sli4_cq_event_release(phba, cq_event); 10238 } 10239 } 10240 10241 /** 10242 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 10243 * @phba: pointer to lpfc hba data structure 10244 * @pIocbIn: pointer to the rspiocbq 10245 * @pIocbOut: pointer to the cmdiocbq 10246 * @wcqe: pointer to the complete wcqe 10247 * 10248 * This routine transfers the fields of a command iocbq to a response iocbq 10249 * by copying all the IOCB fields from command iocbq and transferring the 10250 * completion status information from the complete wcqe. 10251 **/ 10252 static void 10253 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 10254 struct lpfc_iocbq *pIocbIn, 10255 struct lpfc_iocbq *pIocbOut, 10256 struct lpfc_wcqe_complete *wcqe) 10257 { 10258 unsigned long iflags; 10259 size_t offset = offsetof(struct lpfc_iocbq, iocb); 10260 10261 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 10262 sizeof(struct lpfc_iocbq) - offset); 10263 /* Map WCQE parameters into irspiocb parameters */ 10264 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 10265 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 10266 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 10267 pIocbIn->iocb.un.fcpi.fcpi_parm = 10268 pIocbOut->iocb.un.fcpi.fcpi_parm - 10269 wcqe->total_data_placed; 10270 else 10271 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10272 else { 10273 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 10274 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 10275 } 10276 10277 /* Pick up HBA exchange busy condition */ 10278 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 10279 spin_lock_irqsave(&phba->hbalock, iflags); 10280 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 10281 spin_unlock_irqrestore(&phba->hbalock, iflags); 10282 } 10283 } 10284 10285 /** 10286 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 10287 * @phba: Pointer to HBA context object. 10288 * @wcqe: Pointer to work-queue completion queue entry. 10289 * 10290 * This routine handles an ELS work-queue completion event and construct 10291 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 10292 * discovery engine to handle. 10293 * 10294 * Return: Pointer to the receive IOCBQ, NULL otherwise. 10295 **/ 10296 static struct lpfc_iocbq * 10297 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 10298 struct lpfc_iocbq *irspiocbq) 10299 { 10300 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10301 struct lpfc_iocbq *cmdiocbq; 10302 struct lpfc_wcqe_complete *wcqe; 10303 unsigned long iflags; 10304 10305 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 10306 spin_lock_irqsave(&phba->hbalock, iflags); 10307 pring->stats.iocb_event++; 10308 /* Look up the ELS command IOCB and create pseudo response IOCB */ 10309 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 10310 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10311 spin_unlock_irqrestore(&phba->hbalock, iflags); 10312 10313 if (unlikely(!cmdiocbq)) { 10314 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10315 "0386 ELS complete with no corresponding " 10316 "cmdiocb: iotag (%d)\n", 10317 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10318 lpfc_sli_release_iocbq(phba, irspiocbq); 10319 return NULL; 10320 } 10321 10322 /* Fake the irspiocbq and copy necessary response information */ 10323 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 10324 10325 return irspiocbq; 10326 } 10327 10328 /** 10329 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 10330 * @phba: Pointer to HBA context object. 10331 * @cqe: Pointer to mailbox completion queue entry. 10332 * 10333 * This routine process a mailbox completion queue entry with asynchrous 10334 * event. 10335 * 10336 * Return: true if work posted to worker thread, otherwise false. 10337 **/ 10338 static bool 10339 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 10340 { 10341 struct lpfc_cq_event *cq_event; 10342 unsigned long iflags; 10343 10344 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10345 "0392 Async Event: word0:x%x, word1:x%x, " 10346 "word2:x%x, word3:x%x\n", mcqe->word0, 10347 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 10348 10349 /* Allocate a new internal CQ_EVENT entry */ 10350 cq_event = lpfc_sli4_cq_event_alloc(phba); 10351 if (!cq_event) { 10352 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10353 "0394 Failed to allocate CQ_EVENT entry\n"); 10354 return false; 10355 } 10356 10357 /* Move the CQE into an asynchronous event entry */ 10358 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 10359 spin_lock_irqsave(&phba->hbalock, iflags); 10360 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 10361 /* Set the async event flag */ 10362 phba->hba_flag |= ASYNC_EVENT; 10363 spin_unlock_irqrestore(&phba->hbalock, iflags); 10364 10365 return true; 10366 } 10367 10368 /** 10369 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 10370 * @phba: Pointer to HBA context object. 10371 * @cqe: Pointer to mailbox completion queue entry. 10372 * 10373 * This routine process a mailbox completion queue entry with mailbox 10374 * completion event. 10375 * 10376 * Return: true if work posted to worker thread, otherwise false. 10377 **/ 10378 static bool 10379 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 10380 { 10381 uint32_t mcqe_status; 10382 MAILBOX_t *mbox, *pmbox; 10383 struct lpfc_mqe *mqe; 10384 struct lpfc_vport *vport; 10385 struct lpfc_nodelist *ndlp; 10386 struct lpfc_dmabuf *mp; 10387 unsigned long iflags; 10388 LPFC_MBOXQ_t *pmb; 10389 bool workposted = false; 10390 int rc; 10391 10392 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 10393 if (!bf_get(lpfc_trailer_completed, mcqe)) 10394 goto out_no_mqe_complete; 10395 10396 /* Get the reference to the active mbox command */ 10397 spin_lock_irqsave(&phba->hbalock, iflags); 10398 pmb = phba->sli.mbox_active; 10399 if (unlikely(!pmb)) { 10400 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 10401 "1832 No pending MBOX command to handle\n"); 10402 spin_unlock_irqrestore(&phba->hbalock, iflags); 10403 goto out_no_mqe_complete; 10404 } 10405 spin_unlock_irqrestore(&phba->hbalock, iflags); 10406 mqe = &pmb->u.mqe; 10407 pmbox = (MAILBOX_t *)&pmb->u.mqe; 10408 mbox = phba->mbox; 10409 vport = pmb->vport; 10410 10411 /* Reset heartbeat timer */ 10412 phba->last_completion_time = jiffies; 10413 del_timer(&phba->sli.mbox_tmo); 10414 10415 /* Move mbox data to caller's mailbox region, do endian swapping */ 10416 if (pmb->mbox_cmpl && mbox) 10417 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 10418 /* Set the mailbox status with SLI4 range 0x4000 */ 10419 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 10420 if (mcqe_status != MB_CQE_STATUS_SUCCESS) 10421 bf_set(lpfc_mqe_status, mqe, 10422 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 10423 10424 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10425 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10426 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 10427 "MBOX dflt rpi: status:x%x rpi:x%x", 10428 mcqe_status, 10429 pmbox->un.varWords[0], 0); 10430 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 10431 mp = (struct lpfc_dmabuf *)(pmb->context1); 10432 ndlp = (struct lpfc_nodelist *)pmb->context2; 10433 /* Reg_LOGIN of dflt RPI was successful. Now lets get 10434 * RID of the PPI using the same mbox buffer. 10435 */ 10436 lpfc_unreg_login(phba, vport->vpi, 10437 pmbox->un.varWords[0], pmb); 10438 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 10439 pmb->context1 = mp; 10440 pmb->context2 = ndlp; 10441 pmb->vport = vport; 10442 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 10443 if (rc != MBX_BUSY) 10444 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 10445 LOG_SLI, "0385 rc should " 10446 "have been MBX_BUSY\n"); 10447 if (rc != MBX_NOT_FINISHED) 10448 goto send_current_mbox; 10449 } 10450 } 10451 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 10452 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10453 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 10454 10455 /* There is mailbox completion work to do */ 10456 spin_lock_irqsave(&phba->hbalock, iflags); 10457 __lpfc_mbox_cmpl_put(phba, pmb); 10458 phba->work_ha |= HA_MBATT; 10459 spin_unlock_irqrestore(&phba->hbalock, iflags); 10460 workposted = true; 10461 10462 send_current_mbox: 10463 spin_lock_irqsave(&phba->hbalock, iflags); 10464 /* Release the mailbox command posting token */ 10465 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10466 /* Setting active mailbox pointer need to be in sync to flag clear */ 10467 phba->sli.mbox_active = NULL; 10468 spin_unlock_irqrestore(&phba->hbalock, iflags); 10469 /* Wake up worker thread to post the next pending mailbox command */ 10470 lpfc_worker_wake_up(phba); 10471 out_no_mqe_complete: 10472 if (bf_get(lpfc_trailer_consumed, mcqe)) 10473 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 10474 return workposted; 10475 } 10476 10477 /** 10478 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 10479 * @phba: Pointer to HBA context object. 10480 * @cqe: Pointer to mailbox completion queue entry. 10481 * 10482 * This routine process a mailbox completion queue entry, it invokes the 10483 * proper mailbox complete handling or asynchrous event handling routine 10484 * according to the MCQE's async bit. 10485 * 10486 * Return: true if work posted to worker thread, otherwise false. 10487 **/ 10488 static bool 10489 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 10490 { 10491 struct lpfc_mcqe mcqe; 10492 bool workposted; 10493 10494 /* Copy the mailbox MCQE and convert endian order as needed */ 10495 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 10496 10497 /* Invoke the proper event handling routine */ 10498 if (!bf_get(lpfc_trailer_async, &mcqe)) 10499 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 10500 else 10501 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 10502 return workposted; 10503 } 10504 10505 /** 10506 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 10507 * @phba: Pointer to HBA context object. 10508 * @wcqe: Pointer to work-queue completion queue entry. 10509 * 10510 * This routine handles an ELS work-queue completion event. 10511 * 10512 * Return: true if work posted to worker thread, otherwise false. 10513 **/ 10514 static bool 10515 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 10516 struct lpfc_wcqe_complete *wcqe) 10517 { 10518 struct lpfc_iocbq *irspiocbq; 10519 unsigned long iflags; 10520 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 10521 10522 /* Get an irspiocbq for later ELS response processing use */ 10523 irspiocbq = lpfc_sli_get_iocbq(phba); 10524 if (!irspiocbq) { 10525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10526 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 10527 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 10528 pring->txq_cnt, phba->iocb_cnt, 10529 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 10530 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 10531 return false; 10532 } 10533 10534 /* Save off the slow-path queue event for work thread to process */ 10535 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 10536 spin_lock_irqsave(&phba->hbalock, iflags); 10537 list_add_tail(&irspiocbq->cq_event.list, 10538 &phba->sli4_hba.sp_queue_event); 10539 phba->hba_flag |= HBA_SP_QUEUE_EVT; 10540 spin_unlock_irqrestore(&phba->hbalock, iflags); 10541 10542 return true; 10543 } 10544 10545 /** 10546 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 10547 * @phba: Pointer to HBA context object. 10548 * @wcqe: Pointer to work-queue completion queue entry. 10549 * 10550 * This routine handles slow-path WQ entry comsumed event by invoking the 10551 * proper WQ release routine to the slow-path WQ. 10552 **/ 10553 static void 10554 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 10555 struct lpfc_wcqe_release *wcqe) 10556 { 10557 /* Check for the slow-path ELS work queue */ 10558 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 10559 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 10560 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 10561 else 10562 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10563 "2579 Slow-path wqe consume event carries " 10564 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 10565 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 10566 phba->sli4_hba.els_wq->queue_id); 10567 } 10568 10569 /** 10570 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 10571 * @phba: Pointer to HBA context object. 10572 * @cq: Pointer to a WQ completion queue. 10573 * @wcqe: Pointer to work-queue completion queue entry. 10574 * 10575 * This routine handles an XRI abort event. 10576 * 10577 * Return: true if work posted to worker thread, otherwise false. 10578 **/ 10579 static bool 10580 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 10581 struct lpfc_queue *cq, 10582 struct sli4_wcqe_xri_aborted *wcqe) 10583 { 10584 bool workposted = false; 10585 struct lpfc_cq_event *cq_event; 10586 unsigned long iflags; 10587 10588 /* Allocate a new internal CQ_EVENT entry */ 10589 cq_event = lpfc_sli4_cq_event_alloc(phba); 10590 if (!cq_event) { 10591 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10592 "0602 Failed to allocate CQ_EVENT entry\n"); 10593 return false; 10594 } 10595 10596 /* Move the CQE into the proper xri abort event list */ 10597 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 10598 switch (cq->subtype) { 10599 case LPFC_FCP: 10600 spin_lock_irqsave(&phba->hbalock, iflags); 10601 list_add_tail(&cq_event->list, 10602 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 10603 /* Set the fcp xri abort event flag */ 10604 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 10605 spin_unlock_irqrestore(&phba->hbalock, iflags); 10606 workposted = true; 10607 break; 10608 case LPFC_ELS: 10609 spin_lock_irqsave(&phba->hbalock, iflags); 10610 list_add_tail(&cq_event->list, 10611 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 10612 /* Set the els xri abort event flag */ 10613 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 10614 spin_unlock_irqrestore(&phba->hbalock, iflags); 10615 workposted = true; 10616 break; 10617 default: 10618 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10619 "0603 Invalid work queue CQE subtype (x%x)\n", 10620 cq->subtype); 10621 workposted = false; 10622 break; 10623 } 10624 return workposted; 10625 } 10626 10627 /** 10628 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 10629 * @phba: Pointer to HBA context object. 10630 * @rcqe: Pointer to receive-queue completion queue entry. 10631 * 10632 * This routine process a receive-queue completion queue entry. 10633 * 10634 * Return: true if work posted to worker thread, otherwise false. 10635 **/ 10636 static bool 10637 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 10638 { 10639 bool workposted = false; 10640 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 10641 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 10642 struct hbq_dmabuf *dma_buf; 10643 uint32_t status, rq_id; 10644 unsigned long iflags; 10645 10646 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 10647 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 10648 else 10649 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 10650 if (rq_id != hrq->queue_id) 10651 goto out; 10652 10653 status = bf_get(lpfc_rcqe_status, rcqe); 10654 switch (status) { 10655 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 10656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10657 "2537 Receive Frame Truncated!!\n"); 10658 case FC_STATUS_RQ_SUCCESS: 10659 lpfc_sli4_rq_release(hrq, drq); 10660 spin_lock_irqsave(&phba->hbalock, iflags); 10661 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 10662 if (!dma_buf) { 10663 spin_unlock_irqrestore(&phba->hbalock, iflags); 10664 goto out; 10665 } 10666 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 10667 /* save off the frame for the word thread to process */ 10668 list_add_tail(&dma_buf->cq_event.list, 10669 &phba->sli4_hba.sp_queue_event); 10670 /* Frame received */ 10671 phba->hba_flag |= HBA_SP_QUEUE_EVT; 10672 spin_unlock_irqrestore(&phba->hbalock, iflags); 10673 workposted = true; 10674 break; 10675 case FC_STATUS_INSUFF_BUF_NEED_BUF: 10676 case FC_STATUS_INSUFF_BUF_FRM_DISC: 10677 /* Post more buffers if possible */ 10678 spin_lock_irqsave(&phba->hbalock, iflags); 10679 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 10680 spin_unlock_irqrestore(&phba->hbalock, iflags); 10681 workposted = true; 10682 break; 10683 } 10684 out: 10685 return workposted; 10686 } 10687 10688 /** 10689 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 10690 * @phba: Pointer to HBA context object. 10691 * @cq: Pointer to the completion queue. 10692 * @wcqe: Pointer to a completion queue entry. 10693 * 10694 * This routine process a slow-path work-queue or receive queue completion queue 10695 * entry. 10696 * 10697 * Return: true if work posted to worker thread, otherwise false. 10698 **/ 10699 static bool 10700 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 10701 struct lpfc_cqe *cqe) 10702 { 10703 struct lpfc_cqe cqevt; 10704 bool workposted = false; 10705 10706 /* Copy the work queue CQE and convert endian order if needed */ 10707 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 10708 10709 /* Check and process for different type of WCQE and dispatch */ 10710 switch (bf_get(lpfc_cqe_code, &cqevt)) { 10711 case CQE_CODE_COMPL_WQE: 10712 /* Process the WQ/RQ complete event */ 10713 phba->last_completion_time = jiffies; 10714 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 10715 (struct lpfc_wcqe_complete *)&cqevt); 10716 break; 10717 case CQE_CODE_RELEASE_WQE: 10718 /* Process the WQ release event */ 10719 lpfc_sli4_sp_handle_rel_wcqe(phba, 10720 (struct lpfc_wcqe_release *)&cqevt); 10721 break; 10722 case CQE_CODE_XRI_ABORTED: 10723 /* Process the WQ XRI abort event */ 10724 phba->last_completion_time = jiffies; 10725 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 10726 (struct sli4_wcqe_xri_aborted *)&cqevt); 10727 break; 10728 case CQE_CODE_RECEIVE: 10729 case CQE_CODE_RECEIVE_V1: 10730 /* Process the RQ event */ 10731 phba->last_completion_time = jiffies; 10732 workposted = lpfc_sli4_sp_handle_rcqe(phba, 10733 (struct lpfc_rcqe *)&cqevt); 10734 break; 10735 default: 10736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10737 "0388 Not a valid WCQE code: x%x\n", 10738 bf_get(lpfc_cqe_code, &cqevt)); 10739 break; 10740 } 10741 return workposted; 10742 } 10743 10744 /** 10745 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 10746 * @phba: Pointer to HBA context object. 10747 * @eqe: Pointer to fast-path event queue entry. 10748 * 10749 * This routine process a event queue entry from the slow-path event queue. 10750 * It will check the MajorCode and MinorCode to determine this is for a 10751 * completion event on a completion queue, if not, an error shall be logged 10752 * and just return. Otherwise, it will get to the corresponding completion 10753 * queue and process all the entries on that completion queue, rearm the 10754 * completion queue, and then return. 10755 * 10756 **/ 10757 static void 10758 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 10759 { 10760 struct lpfc_queue *cq = NULL, *childq, *speq; 10761 struct lpfc_cqe *cqe; 10762 bool workposted = false; 10763 int ecount = 0; 10764 uint16_t cqid; 10765 10766 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 10767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10768 "0359 Not a valid slow-path completion " 10769 "event: majorcode=x%x, minorcode=x%x\n", 10770 bf_get_le32(lpfc_eqe_major_code, eqe), 10771 bf_get_le32(lpfc_eqe_minor_code, eqe)); 10772 return; 10773 } 10774 10775 /* Get the reference to the corresponding CQ */ 10776 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 10777 10778 /* Search for completion queue pointer matching this cqid */ 10779 speq = phba->sli4_hba.sp_eq; 10780 list_for_each_entry(childq, &speq->child_list, list) { 10781 if (childq->queue_id == cqid) { 10782 cq = childq; 10783 break; 10784 } 10785 } 10786 if (unlikely(!cq)) { 10787 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 10788 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10789 "0365 Slow-path CQ identifier " 10790 "(%d) does not exist\n", cqid); 10791 return; 10792 } 10793 10794 /* Process all the entries to the CQ */ 10795 switch (cq->type) { 10796 case LPFC_MCQ: 10797 while ((cqe = lpfc_sli4_cq_get(cq))) { 10798 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 10799 if (!(++ecount % LPFC_GET_QE_REL_INT)) 10800 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 10801 } 10802 break; 10803 case LPFC_WCQ: 10804 while ((cqe = lpfc_sli4_cq_get(cq))) { 10805 if (cq->subtype == LPFC_FCP) 10806 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, 10807 cqe); 10808 else 10809 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 10810 cqe); 10811 if (!(++ecount % LPFC_GET_QE_REL_INT)) 10812 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 10813 } 10814 break; 10815 default: 10816 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10817 "0370 Invalid completion queue type (%d)\n", 10818 cq->type); 10819 return; 10820 } 10821 10822 /* Catch the no cq entry condition, log an error */ 10823 if (unlikely(ecount == 0)) 10824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10825 "0371 No entry from the CQ: identifier " 10826 "(x%x), type (%d)\n", cq->queue_id, cq->type); 10827 10828 /* In any case, flash and re-arm the RCQ */ 10829 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 10830 10831 /* wake up worker thread if there are works to be done */ 10832 if (workposted) 10833 lpfc_worker_wake_up(phba); 10834 } 10835 10836 /** 10837 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 10838 * @eqe: Pointer to fast-path completion queue entry. 10839 * 10840 * This routine process a fast-path work queue completion entry from fast-path 10841 * event queue for FCP command response completion. 10842 **/ 10843 static void 10844 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 10845 struct lpfc_wcqe_complete *wcqe) 10846 { 10847 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 10848 struct lpfc_iocbq *cmdiocbq; 10849 struct lpfc_iocbq irspiocbq; 10850 unsigned long iflags; 10851 10852 spin_lock_irqsave(&phba->hbalock, iflags); 10853 pring->stats.iocb_event++; 10854 spin_unlock_irqrestore(&phba->hbalock, iflags); 10855 10856 /* Check for response status */ 10857 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 10858 /* If resource errors reported from HBA, reduce queue 10859 * depth of the SCSI device. 10860 */ 10861 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 10862 IOSTAT_LOCAL_REJECT) && 10863 (wcqe->parameter == IOERR_NO_RESOURCES)) { 10864 phba->lpfc_rampdown_queue_depth(phba); 10865 } 10866 /* Log the error status */ 10867 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10868 "0373 FCP complete error: status=x%x, " 10869 "hw_status=x%x, total_data_specified=%d, " 10870 "parameter=x%x, word3=x%x\n", 10871 bf_get(lpfc_wcqe_c_status, wcqe), 10872 bf_get(lpfc_wcqe_c_hw_status, wcqe), 10873 wcqe->total_data_placed, wcqe->parameter, 10874 wcqe->word3); 10875 } 10876 10877 /* Look up the FCP command IOCB and create pseudo response IOCB */ 10878 spin_lock_irqsave(&phba->hbalock, iflags); 10879 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 10880 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10881 spin_unlock_irqrestore(&phba->hbalock, iflags); 10882 if (unlikely(!cmdiocbq)) { 10883 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10884 "0374 FCP complete with no corresponding " 10885 "cmdiocb: iotag (%d)\n", 10886 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10887 return; 10888 } 10889 if (unlikely(!cmdiocbq->iocb_cmpl)) { 10890 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10891 "0375 FCP cmdiocb not callback function " 10892 "iotag: (%d)\n", 10893 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 10894 return; 10895 } 10896 10897 /* Fake the irspiocb and copy necessary response information */ 10898 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 10899 10900 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 10901 spin_lock_irqsave(&phba->hbalock, iflags); 10902 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 10903 spin_unlock_irqrestore(&phba->hbalock, iflags); 10904 } 10905 10906 /* Pass the cmd_iocb and the rsp state to the upper layer */ 10907 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 10908 } 10909 10910 /** 10911 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 10912 * @phba: Pointer to HBA context object. 10913 * @cq: Pointer to completion queue. 10914 * @wcqe: Pointer to work-queue completion queue entry. 10915 * 10916 * This routine handles an fast-path WQ entry comsumed event by invoking the 10917 * proper WQ release routine to the slow-path WQ. 10918 **/ 10919 static void 10920 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 10921 struct lpfc_wcqe_release *wcqe) 10922 { 10923 struct lpfc_queue *childwq; 10924 bool wqid_matched = false; 10925 uint16_t fcp_wqid; 10926 10927 /* Check for fast-path FCP work queue release */ 10928 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 10929 list_for_each_entry(childwq, &cq->child_list, list) { 10930 if (childwq->queue_id == fcp_wqid) { 10931 lpfc_sli4_wq_release(childwq, 10932 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 10933 wqid_matched = true; 10934 break; 10935 } 10936 } 10937 /* Report warning log message if no match found */ 10938 if (wqid_matched != true) 10939 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10940 "2580 Fast-path wqe consume event carries " 10941 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 10942 } 10943 10944 /** 10945 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 10946 * @cq: Pointer to the completion queue. 10947 * @eqe: Pointer to fast-path completion queue entry. 10948 * 10949 * This routine process a fast-path work queue completion entry from fast-path 10950 * event queue for FCP command response completion. 10951 **/ 10952 static int 10953 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 10954 struct lpfc_cqe *cqe) 10955 { 10956 struct lpfc_wcqe_release wcqe; 10957 bool workposted = false; 10958 10959 /* Copy the work queue CQE and convert endian order if needed */ 10960 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 10961 10962 /* Check and process for different type of WCQE and dispatch */ 10963 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 10964 case CQE_CODE_COMPL_WQE: 10965 /* Process the WQ complete event */ 10966 phba->last_completion_time = jiffies; 10967 lpfc_sli4_fp_handle_fcp_wcqe(phba, 10968 (struct lpfc_wcqe_complete *)&wcqe); 10969 break; 10970 case CQE_CODE_RELEASE_WQE: 10971 /* Process the WQ release event */ 10972 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 10973 (struct lpfc_wcqe_release *)&wcqe); 10974 break; 10975 case CQE_CODE_XRI_ABORTED: 10976 /* Process the WQ XRI abort event */ 10977 phba->last_completion_time = jiffies; 10978 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 10979 (struct sli4_wcqe_xri_aborted *)&wcqe); 10980 break; 10981 default: 10982 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10983 "0144 Not a valid WCQE code: x%x\n", 10984 bf_get(lpfc_wcqe_c_code, &wcqe)); 10985 break; 10986 } 10987 return workposted; 10988 } 10989 10990 /** 10991 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 10992 * @phba: Pointer to HBA context object. 10993 * @eqe: Pointer to fast-path event queue entry. 10994 * 10995 * This routine process a event queue entry from the fast-path event queue. 10996 * It will check the MajorCode and MinorCode to determine this is for a 10997 * completion event on a completion queue, if not, an error shall be logged 10998 * and just return. Otherwise, it will get to the corresponding completion 10999 * queue and process all the entries on the completion queue, rearm the 11000 * completion queue, and then return. 11001 **/ 11002 static void 11003 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11004 uint32_t fcp_cqidx) 11005 { 11006 struct lpfc_queue *cq; 11007 struct lpfc_cqe *cqe; 11008 bool workposted = false; 11009 uint16_t cqid; 11010 int ecount = 0; 11011 11012 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11013 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11014 "0366 Not a valid fast-path completion " 11015 "event: majorcode=x%x, minorcode=x%x\n", 11016 bf_get_le32(lpfc_eqe_major_code, eqe), 11017 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11018 return; 11019 } 11020 11021 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11022 if (unlikely(!cq)) { 11023 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11024 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11025 "0367 Fast-path completion queue " 11026 "does not exist\n"); 11027 return; 11028 } 11029 11030 /* Get the reference to the corresponding CQ */ 11031 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11032 if (unlikely(cqid != cq->queue_id)) { 11033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11034 "0368 Miss-matched fast-path completion " 11035 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 11036 cqid, cq->queue_id); 11037 return; 11038 } 11039 11040 /* Process all the entries to the CQ */ 11041 while ((cqe = lpfc_sli4_cq_get(cq))) { 11042 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 11043 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11044 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11045 } 11046 11047 /* Catch the no cq entry condition */ 11048 if (unlikely(ecount == 0)) 11049 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11050 "0369 No entry from fast-path completion " 11051 "queue fcpcqid=%d\n", cq->queue_id); 11052 11053 /* In any case, flash and re-arm the CQ */ 11054 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 11055 11056 /* wake up worker thread if there are works to be done */ 11057 if (workposted) 11058 lpfc_worker_wake_up(phba); 11059 } 11060 11061 static void 11062 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 11063 { 11064 struct lpfc_eqe *eqe; 11065 11066 /* walk all the EQ entries and drop on the floor */ 11067 while ((eqe = lpfc_sli4_eq_get(eq))) 11068 ; 11069 11070 /* Clear and re-arm the EQ */ 11071 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 11072 } 11073 11074 /** 11075 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11076 * @irq: Interrupt number. 11077 * @dev_id: The device context pointer. 11078 * 11079 * This function is directly called from the PCI layer as an interrupt 11080 * service routine when device with SLI-4 interface spec is enabled with 11081 * MSI-X multi-message interrupt mode and there are slow-path events in 11082 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 11083 * interrupt mode, this function is called as part of the device-level 11084 * interrupt handler. When the PCI slot is in error recovery or the HBA is 11085 * undergoing initialization, the interrupt handler will not process the 11086 * interrupt. The link attention and ELS ring attention events are handled 11087 * by the worker thread. The interrupt handler signals the worker thread 11088 * and returns for these events. This function is called without any lock 11089 * held. It gets the hbalock to access and update SLI data structures. 11090 * 11091 * This function returns IRQ_HANDLED when interrupt is handled else it 11092 * returns IRQ_NONE. 11093 **/ 11094 irqreturn_t 11095 lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 11096 { 11097 struct lpfc_hba *phba; 11098 struct lpfc_queue *speq; 11099 struct lpfc_eqe *eqe; 11100 unsigned long iflag; 11101 int ecount = 0; 11102 11103 /* 11104 * Get the driver's phba structure from the dev_id 11105 */ 11106 phba = (struct lpfc_hba *)dev_id; 11107 11108 if (unlikely(!phba)) 11109 return IRQ_NONE; 11110 11111 /* Get to the EQ struct associated with this vector */ 11112 speq = phba->sli4_hba.sp_eq; 11113 11114 /* Check device state for handling interrupt */ 11115 if (unlikely(lpfc_intr_state_check(phba))) { 11116 /* Check again for link_state with lock held */ 11117 spin_lock_irqsave(&phba->hbalock, iflag); 11118 if (phba->link_state < LPFC_LINK_DOWN) 11119 /* Flush, clear interrupt, and rearm the EQ */ 11120 lpfc_sli4_eq_flush(phba, speq); 11121 spin_unlock_irqrestore(&phba->hbalock, iflag); 11122 return IRQ_NONE; 11123 } 11124 11125 /* 11126 * Process all the event on FCP slow-path EQ 11127 */ 11128 while ((eqe = lpfc_sli4_eq_get(speq))) { 11129 lpfc_sli4_sp_handle_eqe(phba, eqe); 11130 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11131 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 11132 } 11133 11134 /* Always clear and re-arm the slow-path EQ */ 11135 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 11136 11137 /* Catch the no cq entry condition */ 11138 if (unlikely(ecount == 0)) { 11139 if (phba->intr_type == MSIX) 11140 /* MSI-X treated interrupt served as no EQ share INT */ 11141 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11142 "0357 MSI-X interrupt with no EQE\n"); 11143 else 11144 /* Non MSI-X treated on interrupt as EQ share INT */ 11145 return IRQ_NONE; 11146 } 11147 11148 return IRQ_HANDLED; 11149 } /* lpfc_sli4_sp_intr_handler */ 11150 11151 /** 11152 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 11153 * @irq: Interrupt number. 11154 * @dev_id: The device context pointer. 11155 * 11156 * This function is directly called from the PCI layer as an interrupt 11157 * service routine when device with SLI-4 interface spec is enabled with 11158 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 11159 * ring event in the HBA. However, when the device is enabled with either 11160 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 11161 * device-level interrupt handler. When the PCI slot is in error recovery 11162 * or the HBA is undergoing initialization, the interrupt handler will not 11163 * process the interrupt. The SCSI FCP fast-path ring event are handled in 11164 * the intrrupt context. This function is called without any lock held. 11165 * It gets the hbalock to access and update SLI data structures. Note that, 11166 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11167 * equal to that of FCP CQ index. 11168 * 11169 * This function returns IRQ_HANDLED when interrupt is handled else it 11170 * returns IRQ_NONE. 11171 **/ 11172 irqreturn_t 11173 lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11174 { 11175 struct lpfc_hba *phba; 11176 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11177 struct lpfc_queue *fpeq; 11178 struct lpfc_eqe *eqe; 11179 unsigned long iflag; 11180 int ecount = 0; 11181 uint32_t fcp_eqidx; 11182 11183 /* Get the driver's phba structure from the dev_id */ 11184 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 11185 phba = fcp_eq_hdl->phba; 11186 fcp_eqidx = fcp_eq_hdl->idx; 11187 11188 if (unlikely(!phba)) 11189 return IRQ_NONE; 11190 11191 /* Get to the EQ struct associated with this vector */ 11192 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11193 11194 /* Check device state for handling interrupt */ 11195 if (unlikely(lpfc_intr_state_check(phba))) { 11196 /* Check again for link_state with lock held */ 11197 spin_lock_irqsave(&phba->hbalock, iflag); 11198 if (phba->link_state < LPFC_LINK_DOWN) 11199 /* Flush, clear interrupt, and rearm the EQ */ 11200 lpfc_sli4_eq_flush(phba, fpeq); 11201 spin_unlock_irqrestore(&phba->hbalock, iflag); 11202 return IRQ_NONE; 11203 } 11204 11205 /* 11206 * Process all the event on FCP fast-path EQ 11207 */ 11208 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11209 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11210 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11211 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11212 } 11213 11214 /* Always clear and re-arm the fast-path EQ */ 11215 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11216 11217 if (unlikely(ecount == 0)) { 11218 if (phba->intr_type == MSIX) 11219 /* MSI-X treated interrupt served as no EQ share INT */ 11220 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11221 "0358 MSI-X interrupt with no EQE\n"); 11222 else 11223 /* Non MSI-X treated on interrupt as EQ share INT */ 11224 return IRQ_NONE; 11225 } 11226 11227 return IRQ_HANDLED; 11228 } /* lpfc_sli4_fp_intr_handler */ 11229 11230 /** 11231 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 11232 * @irq: Interrupt number. 11233 * @dev_id: The device context pointer. 11234 * 11235 * This function is the device-level interrupt handler to device with SLI-4 11236 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 11237 * interrupt mode is enabled and there is an event in the HBA which requires 11238 * driver attention. This function invokes the slow-path interrupt attention 11239 * handling function and fast-path interrupt attention handling function in 11240 * turn to process the relevant HBA attention events. This function is called 11241 * without any lock held. It gets the hbalock to access and update SLI data 11242 * structures. 11243 * 11244 * This function returns IRQ_HANDLED when interrupt is handled, else it 11245 * returns IRQ_NONE. 11246 **/ 11247 irqreturn_t 11248 lpfc_sli4_intr_handler(int irq, void *dev_id) 11249 { 11250 struct lpfc_hba *phba; 11251 irqreturn_t sp_irq_rc, fp_irq_rc; 11252 bool fp_handled = false; 11253 uint32_t fcp_eqidx; 11254 11255 /* Get the driver's phba structure from the dev_id */ 11256 phba = (struct lpfc_hba *)dev_id; 11257 11258 if (unlikely(!phba)) 11259 return IRQ_NONE; 11260 11261 /* 11262 * Invokes slow-path host attention interrupt handling as appropriate. 11263 */ 11264 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 11265 11266 /* 11267 * Invoke fast-path host attention interrupt handling as appropriate. 11268 */ 11269 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11270 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11271 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11272 if (fp_irq_rc == IRQ_HANDLED) 11273 fp_handled |= true; 11274 } 11275 11276 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11277 } /* lpfc_sli4_intr_handler */ 11278 11279 /** 11280 * lpfc_sli4_queue_free - free a queue structure and associated memory 11281 * @queue: The queue structure to free. 11282 * 11283 * This function frees a queue structure and the DMAable memory used for 11284 * the host resident queue. This function must be called after destroying the 11285 * queue on the HBA. 11286 **/ 11287 void 11288 lpfc_sli4_queue_free(struct lpfc_queue *queue) 11289 { 11290 struct lpfc_dmabuf *dmabuf; 11291 11292 if (!queue) 11293 return; 11294 11295 while (!list_empty(&queue->page_list)) { 11296 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 11297 list); 11298 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 11299 dmabuf->virt, dmabuf->phys); 11300 kfree(dmabuf); 11301 } 11302 kfree(queue); 11303 return; 11304 } 11305 11306 /** 11307 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 11308 * @phba: The HBA that this queue is being created on. 11309 * @entry_size: The size of each queue entry for this queue. 11310 * @entry count: The number of entries that this queue will handle. 11311 * 11312 * This function allocates a queue structure and the DMAable memory used for 11313 * the host resident queue. This function must be called before creating the 11314 * queue on the HBA. 11315 **/ 11316 struct lpfc_queue * 11317 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 11318 uint32_t entry_count) 11319 { 11320 struct lpfc_queue *queue; 11321 struct lpfc_dmabuf *dmabuf; 11322 int x, total_qe_count; 11323 void *dma_pointer; 11324 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11325 11326 if (!phba->sli4_hba.pc_sli4_params.supported) 11327 hw_page_size = SLI4_PAGE_SIZE; 11328 11329 queue = kzalloc(sizeof(struct lpfc_queue) + 11330 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 11331 if (!queue) 11332 return NULL; 11333 queue->page_count = (ALIGN(entry_size * entry_count, 11334 hw_page_size))/hw_page_size; 11335 INIT_LIST_HEAD(&queue->list); 11336 INIT_LIST_HEAD(&queue->page_list); 11337 INIT_LIST_HEAD(&queue->child_list); 11338 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 11339 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 11340 if (!dmabuf) 11341 goto out_fail; 11342 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 11343 hw_page_size, &dmabuf->phys, 11344 GFP_KERNEL); 11345 if (!dmabuf->virt) { 11346 kfree(dmabuf); 11347 goto out_fail; 11348 } 11349 memset(dmabuf->virt, 0, hw_page_size); 11350 dmabuf->buffer_tag = x; 11351 list_add_tail(&dmabuf->list, &queue->page_list); 11352 /* initialize queue's entry array */ 11353 dma_pointer = dmabuf->virt; 11354 for (; total_qe_count < entry_count && 11355 dma_pointer < (hw_page_size + dmabuf->virt); 11356 total_qe_count++, dma_pointer += entry_size) { 11357 queue->qe[total_qe_count].address = dma_pointer; 11358 } 11359 } 11360 queue->entry_size = entry_size; 11361 queue->entry_count = entry_count; 11362 queue->phba = phba; 11363 11364 return queue; 11365 out_fail: 11366 lpfc_sli4_queue_free(queue); 11367 return NULL; 11368 } 11369 11370 /** 11371 * lpfc_eq_create - Create an Event Queue on the HBA 11372 * @phba: HBA structure that indicates port to create a queue on. 11373 * @eq: The queue structure to use to create the event queue. 11374 * @imax: The maximum interrupt per second limit. 11375 * 11376 * This function creates an event queue, as detailed in @eq, on a port, 11377 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 11378 * 11379 * The @phba struct is used to send mailbox command to HBA. The @eq struct 11380 * is used to get the entry count and entry size that are necessary to 11381 * determine the number of pages to allocate and use for this queue. This 11382 * function will send the EQ_CREATE mailbox command to the HBA to setup the 11383 * event queue. This function is asynchronous and will wait for the mailbox 11384 * command to finish before continuing. 11385 * 11386 * On success this function will return a zero. If unable to allocate enough 11387 * memory this function will return -ENOMEM. If the queue create mailbox command 11388 * fails this function will return -ENXIO. 11389 **/ 11390 uint32_t 11391 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 11392 { 11393 struct lpfc_mbx_eq_create *eq_create; 11394 LPFC_MBOXQ_t *mbox; 11395 int rc, length, status = 0; 11396 struct lpfc_dmabuf *dmabuf; 11397 uint32_t shdr_status, shdr_add_status; 11398 union lpfc_sli4_cfg_shdr *shdr; 11399 uint16_t dmult; 11400 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11401 11402 if (!phba->sli4_hba.pc_sli4_params.supported) 11403 hw_page_size = SLI4_PAGE_SIZE; 11404 11405 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11406 if (!mbox) 11407 return -ENOMEM; 11408 length = (sizeof(struct lpfc_mbx_eq_create) - 11409 sizeof(struct lpfc_sli4_cfg_mhdr)); 11410 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11411 LPFC_MBOX_OPCODE_EQ_CREATE, 11412 length, LPFC_SLI4_MBX_EMBED); 11413 eq_create = &mbox->u.mqe.un.eq_create; 11414 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 11415 eq->page_count); 11416 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 11417 LPFC_EQE_SIZE); 11418 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 11419 /* Calculate delay multiper from maximum interrupt per second */ 11420 dmult = LPFC_DMULT_CONST/imax - 1; 11421 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 11422 dmult); 11423 switch (eq->entry_count) { 11424 default: 11425 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11426 "0360 Unsupported EQ count. (%d)\n", 11427 eq->entry_count); 11428 if (eq->entry_count < 256) 11429 return -EINVAL; 11430 /* otherwise default to smallest count (drop through) */ 11431 case 256: 11432 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11433 LPFC_EQ_CNT_256); 11434 break; 11435 case 512: 11436 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11437 LPFC_EQ_CNT_512); 11438 break; 11439 case 1024: 11440 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11441 LPFC_EQ_CNT_1024); 11442 break; 11443 case 2048: 11444 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11445 LPFC_EQ_CNT_2048); 11446 break; 11447 case 4096: 11448 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 11449 LPFC_EQ_CNT_4096); 11450 break; 11451 } 11452 list_for_each_entry(dmabuf, &eq->page_list, list) { 11453 memset(dmabuf->virt, 0, hw_page_size); 11454 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11455 putPaddrLow(dmabuf->phys); 11456 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 11457 putPaddrHigh(dmabuf->phys); 11458 } 11459 mbox->vport = phba->pport; 11460 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11461 mbox->context1 = NULL; 11462 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11463 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 11464 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11465 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11466 if (shdr_status || shdr_add_status || rc) { 11467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11468 "2500 EQ_CREATE mailbox failed with " 11469 "status x%x add_status x%x, mbx status x%x\n", 11470 shdr_status, shdr_add_status, rc); 11471 status = -ENXIO; 11472 } 11473 eq->type = LPFC_EQ; 11474 eq->subtype = LPFC_NONE; 11475 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 11476 if (eq->queue_id == 0xFFFF) 11477 status = -ENXIO; 11478 eq->host_index = 0; 11479 eq->hba_index = 0; 11480 11481 mempool_free(mbox, phba->mbox_mem_pool); 11482 return status; 11483 } 11484 11485 /** 11486 * lpfc_cq_create - Create a Completion Queue on the HBA 11487 * @phba: HBA structure that indicates port to create a queue on. 11488 * @cq: The queue structure to use to create the completion queue. 11489 * @eq: The event queue to bind this completion queue to. 11490 * 11491 * This function creates a completion queue, as detailed in @wq, on a port, 11492 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 11493 * 11494 * The @phba struct is used to send mailbox command to HBA. The @cq struct 11495 * is used to get the entry count and entry size that are necessary to 11496 * determine the number of pages to allocate and use for this queue. The @eq 11497 * is used to indicate which event queue to bind this completion queue to. This 11498 * function will send the CQ_CREATE mailbox command to the HBA to setup the 11499 * completion queue. This function is asynchronous and will wait for the mailbox 11500 * command to finish before continuing. 11501 * 11502 * On success this function will return a zero. If unable to allocate enough 11503 * memory this function will return -ENOMEM. If the queue create mailbox command 11504 * fails this function will return -ENXIO. 11505 **/ 11506 uint32_t 11507 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 11508 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 11509 { 11510 struct lpfc_mbx_cq_create *cq_create; 11511 struct lpfc_dmabuf *dmabuf; 11512 LPFC_MBOXQ_t *mbox; 11513 int rc, length, status = 0; 11514 uint32_t shdr_status, shdr_add_status; 11515 union lpfc_sli4_cfg_shdr *shdr; 11516 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11517 11518 if (!phba->sli4_hba.pc_sli4_params.supported) 11519 hw_page_size = SLI4_PAGE_SIZE; 11520 11521 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11522 if (!mbox) 11523 return -ENOMEM; 11524 length = (sizeof(struct lpfc_mbx_cq_create) - 11525 sizeof(struct lpfc_sli4_cfg_mhdr)); 11526 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11527 LPFC_MBOX_OPCODE_CQ_CREATE, 11528 length, LPFC_SLI4_MBX_EMBED); 11529 cq_create = &mbox->u.mqe.un.cq_create; 11530 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 11531 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 11532 cq->page_count); 11533 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 11534 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 11535 bf_set(lpfc_mbox_hdr_version, &shdr->request, 11536 phba->sli4_hba.pc_sli4_params.cqv); 11537 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 11538 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 11539 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 11540 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 11541 eq->queue_id); 11542 } else { 11543 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 11544 eq->queue_id); 11545 } 11546 switch (cq->entry_count) { 11547 default: 11548 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11549 "0361 Unsupported CQ count. (%d)\n", 11550 cq->entry_count); 11551 if (cq->entry_count < 256) 11552 return -EINVAL; 11553 /* otherwise default to smallest count (drop through) */ 11554 case 256: 11555 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 11556 LPFC_CQ_CNT_256); 11557 break; 11558 case 512: 11559 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 11560 LPFC_CQ_CNT_512); 11561 break; 11562 case 1024: 11563 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 11564 LPFC_CQ_CNT_1024); 11565 break; 11566 } 11567 list_for_each_entry(dmabuf, &cq->page_list, list) { 11568 memset(dmabuf->virt, 0, hw_page_size); 11569 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11570 putPaddrLow(dmabuf->phys); 11571 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 11572 putPaddrHigh(dmabuf->phys); 11573 } 11574 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11575 11576 /* The IOCTL status is embedded in the mailbox subheader. */ 11577 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11578 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11579 if (shdr_status || shdr_add_status || rc) { 11580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11581 "2501 CQ_CREATE mailbox failed with " 11582 "status x%x add_status x%x, mbx status x%x\n", 11583 shdr_status, shdr_add_status, rc); 11584 status = -ENXIO; 11585 goto out; 11586 } 11587 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 11588 if (cq->queue_id == 0xFFFF) { 11589 status = -ENXIO; 11590 goto out; 11591 } 11592 /* link the cq onto the parent eq child list */ 11593 list_add_tail(&cq->list, &eq->child_list); 11594 /* Set up completion queue's type and subtype */ 11595 cq->type = type; 11596 cq->subtype = subtype; 11597 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 11598 cq->assoc_qid = eq->queue_id; 11599 cq->host_index = 0; 11600 cq->hba_index = 0; 11601 11602 out: 11603 mempool_free(mbox, phba->mbox_mem_pool); 11604 return status; 11605 } 11606 11607 /** 11608 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 11609 * @phba: HBA structure that indicates port to create a queue on. 11610 * @mq: The queue structure to use to create the mailbox queue. 11611 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 11612 * @cq: The completion queue to associate with this cq. 11613 * 11614 * This function provides failback (fb) functionality when the 11615 * mq_create_ext fails on older FW generations. It's purpose is identical 11616 * to mq_create_ext otherwise. 11617 * 11618 * This routine cannot fail as all attributes were previously accessed and 11619 * initialized in mq_create_ext. 11620 **/ 11621 static void 11622 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 11623 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 11624 { 11625 struct lpfc_mbx_mq_create *mq_create; 11626 struct lpfc_dmabuf *dmabuf; 11627 int length; 11628 11629 length = (sizeof(struct lpfc_mbx_mq_create) - 11630 sizeof(struct lpfc_sli4_cfg_mhdr)); 11631 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11632 LPFC_MBOX_OPCODE_MQ_CREATE, 11633 length, LPFC_SLI4_MBX_EMBED); 11634 mq_create = &mbox->u.mqe.un.mq_create; 11635 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 11636 mq->page_count); 11637 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 11638 cq->queue_id); 11639 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 11640 switch (mq->entry_count) { 11641 case 16: 11642 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 11643 LPFC_MQ_RING_SIZE_16); 11644 break; 11645 case 32: 11646 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 11647 LPFC_MQ_RING_SIZE_32); 11648 break; 11649 case 64: 11650 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 11651 LPFC_MQ_RING_SIZE_64); 11652 break; 11653 case 128: 11654 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 11655 LPFC_MQ_RING_SIZE_128); 11656 break; 11657 } 11658 list_for_each_entry(dmabuf, &mq->page_list, list) { 11659 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 11660 putPaddrLow(dmabuf->phys); 11661 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 11662 putPaddrHigh(dmabuf->phys); 11663 } 11664 } 11665 11666 /** 11667 * lpfc_mq_create - Create a mailbox Queue on the HBA 11668 * @phba: HBA structure that indicates port to create a queue on. 11669 * @mq: The queue structure to use to create the mailbox queue. 11670 * @cq: The completion queue to associate with this cq. 11671 * @subtype: The queue's subtype. 11672 * 11673 * This function creates a mailbox queue, as detailed in @mq, on a port, 11674 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 11675 * 11676 * The @phba struct is used to send mailbox command to HBA. The @cq struct 11677 * is used to get the entry count and entry size that are necessary to 11678 * determine the number of pages to allocate and use for this queue. This 11679 * function will send the MQ_CREATE mailbox command to the HBA to setup the 11680 * mailbox queue. This function is asynchronous and will wait for the mailbox 11681 * command to finish before continuing. 11682 * 11683 * On success this function will return a zero. If unable to allocate enough 11684 * memory this function will return -ENOMEM. If the queue create mailbox command 11685 * fails this function will return -ENXIO. 11686 **/ 11687 int32_t 11688 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 11689 struct lpfc_queue *cq, uint32_t subtype) 11690 { 11691 struct lpfc_mbx_mq_create *mq_create; 11692 struct lpfc_mbx_mq_create_ext *mq_create_ext; 11693 struct lpfc_dmabuf *dmabuf; 11694 LPFC_MBOXQ_t *mbox; 11695 int rc, length, status = 0; 11696 uint32_t shdr_status, shdr_add_status; 11697 union lpfc_sli4_cfg_shdr *shdr; 11698 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11699 11700 if (!phba->sli4_hba.pc_sli4_params.supported) 11701 hw_page_size = SLI4_PAGE_SIZE; 11702 11703 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11704 if (!mbox) 11705 return -ENOMEM; 11706 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 11707 sizeof(struct lpfc_sli4_cfg_mhdr)); 11708 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11709 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 11710 length, LPFC_SLI4_MBX_EMBED); 11711 11712 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 11713 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 11714 bf_set(lpfc_mbx_mq_create_ext_num_pages, 11715 &mq_create_ext->u.request, mq->page_count); 11716 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 11717 &mq_create_ext->u.request, 1); 11718 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 11719 &mq_create_ext->u.request, 1); 11720 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 11721 &mq_create_ext->u.request, 1); 11722 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 11723 &mq_create_ext->u.request, 1); 11724 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 11725 &mq_create_ext->u.request, 1); 11726 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 11727 bf_set(lpfc_mbox_hdr_version, &shdr->request, 11728 phba->sli4_hba.pc_sli4_params.mqv); 11729 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 11730 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 11731 cq->queue_id); 11732 else 11733 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 11734 cq->queue_id); 11735 switch (mq->entry_count) { 11736 default: 11737 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11738 "0362 Unsupported MQ count. (%d)\n", 11739 mq->entry_count); 11740 if (mq->entry_count < 16) 11741 return -EINVAL; 11742 /* otherwise default to smallest count (drop through) */ 11743 case 16: 11744 bf_set(lpfc_mq_context_ring_size, 11745 &mq_create_ext->u.request.context, 11746 LPFC_MQ_RING_SIZE_16); 11747 break; 11748 case 32: 11749 bf_set(lpfc_mq_context_ring_size, 11750 &mq_create_ext->u.request.context, 11751 LPFC_MQ_RING_SIZE_32); 11752 break; 11753 case 64: 11754 bf_set(lpfc_mq_context_ring_size, 11755 &mq_create_ext->u.request.context, 11756 LPFC_MQ_RING_SIZE_64); 11757 break; 11758 case 128: 11759 bf_set(lpfc_mq_context_ring_size, 11760 &mq_create_ext->u.request.context, 11761 LPFC_MQ_RING_SIZE_128); 11762 break; 11763 } 11764 list_for_each_entry(dmabuf, &mq->page_list, list) { 11765 memset(dmabuf->virt, 0, hw_page_size); 11766 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 11767 putPaddrLow(dmabuf->phys); 11768 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 11769 putPaddrHigh(dmabuf->phys); 11770 } 11771 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11772 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 11773 &mq_create_ext->u.response); 11774 if (rc != MBX_SUCCESS) { 11775 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11776 "2795 MQ_CREATE_EXT failed with " 11777 "status x%x. Failback to MQ_CREATE.\n", 11778 rc); 11779 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 11780 mq_create = &mbox->u.mqe.un.mq_create; 11781 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11782 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 11783 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 11784 &mq_create->u.response); 11785 } 11786 11787 /* The IOCTL status is embedded in the mailbox subheader. */ 11788 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11789 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11790 if (shdr_status || shdr_add_status || rc) { 11791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11792 "2502 MQ_CREATE mailbox failed with " 11793 "status x%x add_status x%x, mbx status x%x\n", 11794 shdr_status, shdr_add_status, rc); 11795 status = -ENXIO; 11796 goto out; 11797 } 11798 if (mq->queue_id == 0xFFFF) { 11799 status = -ENXIO; 11800 goto out; 11801 } 11802 mq->type = LPFC_MQ; 11803 mq->assoc_qid = cq->queue_id; 11804 mq->subtype = subtype; 11805 mq->host_index = 0; 11806 mq->hba_index = 0; 11807 11808 /* link the mq onto the parent cq child list */ 11809 list_add_tail(&mq->list, &cq->child_list); 11810 out: 11811 mempool_free(mbox, phba->mbox_mem_pool); 11812 return status; 11813 } 11814 11815 /** 11816 * lpfc_wq_create - Create a Work Queue on the HBA 11817 * @phba: HBA structure that indicates port to create a queue on. 11818 * @wq: The queue structure to use to create the work queue. 11819 * @cq: The completion queue to bind this work queue to. 11820 * @subtype: The subtype of the work queue indicating its functionality. 11821 * 11822 * This function creates a work queue, as detailed in @wq, on a port, described 11823 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 11824 * 11825 * The @phba struct is used to send mailbox command to HBA. The @wq struct 11826 * is used to get the entry count and entry size that are necessary to 11827 * determine the number of pages to allocate and use for this queue. The @cq 11828 * is used to indicate which completion queue to bind this work queue to. This 11829 * function will send the WQ_CREATE mailbox command to the HBA to setup the 11830 * work queue. This function is asynchronous and will wait for the mailbox 11831 * command to finish before continuing. 11832 * 11833 * On success this function will return a zero. If unable to allocate enough 11834 * memory this function will return -ENOMEM. If the queue create mailbox command 11835 * fails this function will return -ENXIO. 11836 **/ 11837 uint32_t 11838 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 11839 struct lpfc_queue *cq, uint32_t subtype) 11840 { 11841 struct lpfc_mbx_wq_create *wq_create; 11842 struct lpfc_dmabuf *dmabuf; 11843 LPFC_MBOXQ_t *mbox; 11844 int rc, length, status = 0; 11845 uint32_t shdr_status, shdr_add_status; 11846 union lpfc_sli4_cfg_shdr *shdr; 11847 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11848 struct dma_address *page; 11849 11850 if (!phba->sli4_hba.pc_sli4_params.supported) 11851 hw_page_size = SLI4_PAGE_SIZE; 11852 11853 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11854 if (!mbox) 11855 return -ENOMEM; 11856 length = (sizeof(struct lpfc_mbx_wq_create) - 11857 sizeof(struct lpfc_sli4_cfg_mhdr)); 11858 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11859 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 11860 length, LPFC_SLI4_MBX_EMBED); 11861 wq_create = &mbox->u.mqe.un.wq_create; 11862 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 11863 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 11864 wq->page_count); 11865 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 11866 cq->queue_id); 11867 bf_set(lpfc_mbox_hdr_version, &shdr->request, 11868 phba->sli4_hba.pc_sli4_params.wqv); 11869 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { 11870 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 11871 wq->entry_count); 11872 switch (wq->entry_size) { 11873 default: 11874 case 64: 11875 bf_set(lpfc_mbx_wq_create_wqe_size, 11876 &wq_create->u.request_1, 11877 LPFC_WQ_WQE_SIZE_64); 11878 break; 11879 case 128: 11880 bf_set(lpfc_mbx_wq_create_wqe_size, 11881 &wq_create->u.request_1, 11882 LPFC_WQ_WQE_SIZE_128); 11883 break; 11884 } 11885 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 11886 (PAGE_SIZE/SLI4_PAGE_SIZE)); 11887 page = wq_create->u.request_1.page; 11888 } else { 11889 page = wq_create->u.request.page; 11890 } 11891 list_for_each_entry(dmabuf, &wq->page_list, list) { 11892 memset(dmabuf->virt, 0, hw_page_size); 11893 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 11894 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 11895 } 11896 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11897 /* The IOCTL status is embedded in the mailbox subheader. */ 11898 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11899 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11900 if (shdr_status || shdr_add_status || rc) { 11901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11902 "2503 WQ_CREATE mailbox failed with " 11903 "status x%x add_status x%x, mbx status x%x\n", 11904 shdr_status, shdr_add_status, rc); 11905 status = -ENXIO; 11906 goto out; 11907 } 11908 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 11909 if (wq->queue_id == 0xFFFF) { 11910 status = -ENXIO; 11911 goto out; 11912 } 11913 wq->type = LPFC_WQ; 11914 wq->assoc_qid = cq->queue_id; 11915 wq->subtype = subtype; 11916 wq->host_index = 0; 11917 wq->hba_index = 0; 11918 11919 /* link the wq onto the parent cq child list */ 11920 list_add_tail(&wq->list, &cq->child_list); 11921 out: 11922 mempool_free(mbox, phba->mbox_mem_pool); 11923 return status; 11924 } 11925 11926 /** 11927 * lpfc_rq_create - Create a Receive Queue on the HBA 11928 * @phba: HBA structure that indicates port to create a queue on. 11929 * @hrq: The queue structure to use to create the header receive queue. 11930 * @drq: The queue structure to use to create the data receive queue. 11931 * @cq: The completion queue to bind this work queue to. 11932 * 11933 * This function creates a receive buffer queue pair , as detailed in @hrq and 11934 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 11935 * to the HBA. 11936 * 11937 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 11938 * struct is used to get the entry count that is necessary to determine the 11939 * number of pages to use for this queue. The @cq is used to indicate which 11940 * completion queue to bind received buffers that are posted to these queues to. 11941 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 11942 * receive queue pair. This function is asynchronous and will wait for the 11943 * mailbox command to finish before continuing. 11944 * 11945 * On success this function will return a zero. If unable to allocate enough 11946 * memory this function will return -ENOMEM. If the queue create mailbox command 11947 * fails this function will return -ENXIO. 11948 **/ 11949 uint32_t 11950 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 11951 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 11952 { 11953 struct lpfc_mbx_rq_create *rq_create; 11954 struct lpfc_dmabuf *dmabuf; 11955 LPFC_MBOXQ_t *mbox; 11956 int rc, length, status = 0; 11957 uint32_t shdr_status, shdr_add_status; 11958 union lpfc_sli4_cfg_shdr *shdr; 11959 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11960 11961 if (!phba->sli4_hba.pc_sli4_params.supported) 11962 hw_page_size = SLI4_PAGE_SIZE; 11963 11964 if (hrq->entry_count != drq->entry_count) 11965 return -EINVAL; 11966 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11967 if (!mbox) 11968 return -ENOMEM; 11969 length = (sizeof(struct lpfc_mbx_rq_create) - 11970 sizeof(struct lpfc_sli4_cfg_mhdr)); 11971 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11972 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 11973 length, LPFC_SLI4_MBX_EMBED); 11974 rq_create = &mbox->u.mqe.un.rq_create; 11975 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 11976 bf_set(lpfc_mbox_hdr_version, &shdr->request, 11977 phba->sli4_hba.pc_sli4_params.rqv); 11978 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 11979 bf_set(lpfc_rq_context_rqe_count_1, 11980 &rq_create->u.request.context, 11981 hrq->entry_count); 11982 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 11983 bf_set(lpfc_rq_context_rqe_size, 11984 &rq_create->u.request.context, 11985 LPFC_RQE_SIZE_8); 11986 bf_set(lpfc_rq_context_page_size, 11987 &rq_create->u.request.context, 11988 (PAGE_SIZE/SLI4_PAGE_SIZE)); 11989 } else { 11990 switch (hrq->entry_count) { 11991 default: 11992 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11993 "2535 Unsupported RQ count. (%d)\n", 11994 hrq->entry_count); 11995 if (hrq->entry_count < 512) 11996 return -EINVAL; 11997 /* otherwise default to smallest count (drop through) */ 11998 case 512: 11999 bf_set(lpfc_rq_context_rqe_count, 12000 &rq_create->u.request.context, 12001 LPFC_RQ_RING_SIZE_512); 12002 break; 12003 case 1024: 12004 bf_set(lpfc_rq_context_rqe_count, 12005 &rq_create->u.request.context, 12006 LPFC_RQ_RING_SIZE_1024); 12007 break; 12008 case 2048: 12009 bf_set(lpfc_rq_context_rqe_count, 12010 &rq_create->u.request.context, 12011 LPFC_RQ_RING_SIZE_2048); 12012 break; 12013 case 4096: 12014 bf_set(lpfc_rq_context_rqe_count, 12015 &rq_create->u.request.context, 12016 LPFC_RQ_RING_SIZE_4096); 12017 break; 12018 } 12019 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12020 LPFC_HDR_BUF_SIZE); 12021 } 12022 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12023 cq->queue_id); 12024 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12025 hrq->page_count); 12026 list_for_each_entry(dmabuf, &hrq->page_list, list) { 12027 memset(dmabuf->virt, 0, hw_page_size); 12028 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12029 putPaddrLow(dmabuf->phys); 12030 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12031 putPaddrHigh(dmabuf->phys); 12032 } 12033 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12034 /* The IOCTL status is embedded in the mailbox subheader. */ 12035 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12036 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12037 if (shdr_status || shdr_add_status || rc) { 12038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12039 "2504 RQ_CREATE mailbox failed with " 12040 "status x%x add_status x%x, mbx status x%x\n", 12041 shdr_status, shdr_add_status, rc); 12042 status = -ENXIO; 12043 goto out; 12044 } 12045 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12046 if (hrq->queue_id == 0xFFFF) { 12047 status = -ENXIO; 12048 goto out; 12049 } 12050 hrq->type = LPFC_HRQ; 12051 hrq->assoc_qid = cq->queue_id; 12052 hrq->subtype = subtype; 12053 hrq->host_index = 0; 12054 hrq->hba_index = 0; 12055 12056 /* now create the data queue */ 12057 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12058 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 12059 length, LPFC_SLI4_MBX_EMBED); 12060 bf_set(lpfc_mbox_hdr_version, &shdr->request, 12061 phba->sli4_hba.pc_sli4_params.rqv); 12062 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 12063 bf_set(lpfc_rq_context_rqe_count_1, 12064 &rq_create->u.request.context, hrq->entry_count); 12065 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 12066 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 12067 LPFC_RQE_SIZE_8); 12068 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 12069 (PAGE_SIZE/SLI4_PAGE_SIZE)); 12070 } else { 12071 switch (drq->entry_count) { 12072 default: 12073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12074 "2536 Unsupported RQ count. (%d)\n", 12075 drq->entry_count); 12076 if (drq->entry_count < 512) 12077 return -EINVAL; 12078 /* otherwise default to smallest count (drop through) */ 12079 case 512: 12080 bf_set(lpfc_rq_context_rqe_count, 12081 &rq_create->u.request.context, 12082 LPFC_RQ_RING_SIZE_512); 12083 break; 12084 case 1024: 12085 bf_set(lpfc_rq_context_rqe_count, 12086 &rq_create->u.request.context, 12087 LPFC_RQ_RING_SIZE_1024); 12088 break; 12089 case 2048: 12090 bf_set(lpfc_rq_context_rqe_count, 12091 &rq_create->u.request.context, 12092 LPFC_RQ_RING_SIZE_2048); 12093 break; 12094 case 4096: 12095 bf_set(lpfc_rq_context_rqe_count, 12096 &rq_create->u.request.context, 12097 LPFC_RQ_RING_SIZE_4096); 12098 break; 12099 } 12100 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 12101 LPFC_DATA_BUF_SIZE); 12102 } 12103 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 12104 cq->queue_id); 12105 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 12106 drq->page_count); 12107 list_for_each_entry(dmabuf, &drq->page_list, list) { 12108 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 12109 putPaddrLow(dmabuf->phys); 12110 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 12111 putPaddrHigh(dmabuf->phys); 12112 } 12113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12114 /* The IOCTL status is embedded in the mailbox subheader. */ 12115 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 12116 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12117 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12118 if (shdr_status || shdr_add_status || rc) { 12119 status = -ENXIO; 12120 goto out; 12121 } 12122 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 12123 if (drq->queue_id == 0xFFFF) { 12124 status = -ENXIO; 12125 goto out; 12126 } 12127 drq->type = LPFC_DRQ; 12128 drq->assoc_qid = cq->queue_id; 12129 drq->subtype = subtype; 12130 drq->host_index = 0; 12131 drq->hba_index = 0; 12132 12133 /* link the header and data RQs onto the parent cq child list */ 12134 list_add_tail(&hrq->list, &cq->child_list); 12135 list_add_tail(&drq->list, &cq->child_list); 12136 12137 out: 12138 mempool_free(mbox, phba->mbox_mem_pool); 12139 return status; 12140 } 12141 12142 /** 12143 * lpfc_eq_destroy - Destroy an event Queue on the HBA 12144 * @eq: The queue structure associated with the queue to destroy. 12145 * 12146 * This function destroys a queue, as detailed in @eq by sending an mailbox 12147 * command, specific to the type of queue, to the HBA. 12148 * 12149 * The @eq struct is used to get the queue ID of the queue to destroy. 12150 * 12151 * On success this function will return a zero. If the queue destroy mailbox 12152 * command fails this function will return -ENXIO. 12153 **/ 12154 uint32_t 12155 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 12156 { 12157 LPFC_MBOXQ_t *mbox; 12158 int rc, length, status = 0; 12159 uint32_t shdr_status, shdr_add_status; 12160 union lpfc_sli4_cfg_shdr *shdr; 12161 12162 if (!eq) 12163 return -ENODEV; 12164 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 12165 if (!mbox) 12166 return -ENOMEM; 12167 length = (sizeof(struct lpfc_mbx_eq_destroy) - 12168 sizeof(struct lpfc_sli4_cfg_mhdr)); 12169 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12170 LPFC_MBOX_OPCODE_EQ_DESTROY, 12171 length, LPFC_SLI4_MBX_EMBED); 12172 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 12173 eq->queue_id); 12174 mbox->vport = eq->phba->pport; 12175 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12176 12177 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 12178 /* The IOCTL status is embedded in the mailbox subheader. */ 12179 shdr = (union lpfc_sli4_cfg_shdr *) 12180 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 12181 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12182 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12183 if (shdr_status || shdr_add_status || rc) { 12184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12185 "2505 EQ_DESTROY mailbox failed with " 12186 "status x%x add_status x%x, mbx status x%x\n", 12187 shdr_status, shdr_add_status, rc); 12188 status = -ENXIO; 12189 } 12190 12191 /* Remove eq from any list */ 12192 list_del_init(&eq->list); 12193 mempool_free(mbox, eq->phba->mbox_mem_pool); 12194 return status; 12195 } 12196 12197 /** 12198 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 12199 * @cq: The queue structure associated with the queue to destroy. 12200 * 12201 * This function destroys a queue, as detailed in @cq by sending an mailbox 12202 * command, specific to the type of queue, to the HBA. 12203 * 12204 * The @cq struct is used to get the queue ID of the queue to destroy. 12205 * 12206 * On success this function will return a zero. If the queue destroy mailbox 12207 * command fails this function will return -ENXIO. 12208 **/ 12209 uint32_t 12210 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 12211 { 12212 LPFC_MBOXQ_t *mbox; 12213 int rc, length, status = 0; 12214 uint32_t shdr_status, shdr_add_status; 12215 union lpfc_sli4_cfg_shdr *shdr; 12216 12217 if (!cq) 12218 return -ENODEV; 12219 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 12220 if (!mbox) 12221 return -ENOMEM; 12222 length = (sizeof(struct lpfc_mbx_cq_destroy) - 12223 sizeof(struct lpfc_sli4_cfg_mhdr)); 12224 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12225 LPFC_MBOX_OPCODE_CQ_DESTROY, 12226 length, LPFC_SLI4_MBX_EMBED); 12227 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 12228 cq->queue_id); 12229 mbox->vport = cq->phba->pport; 12230 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12231 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 12232 /* The IOCTL status is embedded in the mailbox subheader. */ 12233 shdr = (union lpfc_sli4_cfg_shdr *) 12234 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 12235 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12236 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12237 if (shdr_status || shdr_add_status || rc) { 12238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12239 "2506 CQ_DESTROY mailbox failed with " 12240 "status x%x add_status x%x, mbx status x%x\n", 12241 shdr_status, shdr_add_status, rc); 12242 status = -ENXIO; 12243 } 12244 /* Remove cq from any list */ 12245 list_del_init(&cq->list); 12246 mempool_free(mbox, cq->phba->mbox_mem_pool); 12247 return status; 12248 } 12249 12250 /** 12251 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 12252 * @qm: The queue structure associated with the queue to destroy. 12253 * 12254 * This function destroys a queue, as detailed in @mq by sending an mailbox 12255 * command, specific to the type of queue, to the HBA. 12256 * 12257 * The @mq struct is used to get the queue ID of the queue to destroy. 12258 * 12259 * On success this function will return a zero. If the queue destroy mailbox 12260 * command fails this function will return -ENXIO. 12261 **/ 12262 uint32_t 12263 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 12264 { 12265 LPFC_MBOXQ_t *mbox; 12266 int rc, length, status = 0; 12267 uint32_t shdr_status, shdr_add_status; 12268 union lpfc_sli4_cfg_shdr *shdr; 12269 12270 if (!mq) 12271 return -ENODEV; 12272 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 12273 if (!mbox) 12274 return -ENOMEM; 12275 length = (sizeof(struct lpfc_mbx_mq_destroy) - 12276 sizeof(struct lpfc_sli4_cfg_mhdr)); 12277 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12278 LPFC_MBOX_OPCODE_MQ_DESTROY, 12279 length, LPFC_SLI4_MBX_EMBED); 12280 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 12281 mq->queue_id); 12282 mbox->vport = mq->phba->pport; 12283 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12284 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 12285 /* The IOCTL status is embedded in the mailbox subheader. */ 12286 shdr = (union lpfc_sli4_cfg_shdr *) 12287 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 12288 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12289 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12290 if (shdr_status || shdr_add_status || rc) { 12291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12292 "2507 MQ_DESTROY mailbox failed with " 12293 "status x%x add_status x%x, mbx status x%x\n", 12294 shdr_status, shdr_add_status, rc); 12295 status = -ENXIO; 12296 } 12297 /* Remove mq from any list */ 12298 list_del_init(&mq->list); 12299 mempool_free(mbox, mq->phba->mbox_mem_pool); 12300 return status; 12301 } 12302 12303 /** 12304 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 12305 * @wq: The queue structure associated with the queue to destroy. 12306 * 12307 * This function destroys a queue, as detailed in @wq by sending an mailbox 12308 * command, specific to the type of queue, to the HBA. 12309 * 12310 * The @wq struct is used to get the queue ID of the queue to destroy. 12311 * 12312 * On success this function will return a zero. If the queue destroy mailbox 12313 * command fails this function will return -ENXIO. 12314 **/ 12315 uint32_t 12316 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 12317 { 12318 LPFC_MBOXQ_t *mbox; 12319 int rc, length, status = 0; 12320 uint32_t shdr_status, shdr_add_status; 12321 union lpfc_sli4_cfg_shdr *shdr; 12322 12323 if (!wq) 12324 return -ENODEV; 12325 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 12326 if (!mbox) 12327 return -ENOMEM; 12328 length = (sizeof(struct lpfc_mbx_wq_destroy) - 12329 sizeof(struct lpfc_sli4_cfg_mhdr)); 12330 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12331 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 12332 length, LPFC_SLI4_MBX_EMBED); 12333 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 12334 wq->queue_id); 12335 mbox->vport = wq->phba->pport; 12336 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12337 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 12338 shdr = (union lpfc_sli4_cfg_shdr *) 12339 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 12340 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12341 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12342 if (shdr_status || shdr_add_status || rc) { 12343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12344 "2508 WQ_DESTROY mailbox failed with " 12345 "status x%x add_status x%x, mbx status x%x\n", 12346 shdr_status, shdr_add_status, rc); 12347 status = -ENXIO; 12348 } 12349 /* Remove wq from any list */ 12350 list_del_init(&wq->list); 12351 mempool_free(mbox, wq->phba->mbox_mem_pool); 12352 return status; 12353 } 12354 12355 /** 12356 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 12357 * @rq: The queue structure associated with the queue to destroy. 12358 * 12359 * This function destroys a queue, as detailed in @rq by sending an mailbox 12360 * command, specific to the type of queue, to the HBA. 12361 * 12362 * The @rq struct is used to get the queue ID of the queue to destroy. 12363 * 12364 * On success this function will return a zero. If the queue destroy mailbox 12365 * command fails this function will return -ENXIO. 12366 **/ 12367 uint32_t 12368 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 12369 struct lpfc_queue *drq) 12370 { 12371 LPFC_MBOXQ_t *mbox; 12372 int rc, length, status = 0; 12373 uint32_t shdr_status, shdr_add_status; 12374 union lpfc_sli4_cfg_shdr *shdr; 12375 12376 if (!hrq || !drq) 12377 return -ENODEV; 12378 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 12379 if (!mbox) 12380 return -ENOMEM; 12381 length = (sizeof(struct lpfc_mbx_rq_destroy) - 12382 sizeof(struct lpfc_sli4_cfg_mhdr)); 12383 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12384 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 12385 length, LPFC_SLI4_MBX_EMBED); 12386 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 12387 hrq->queue_id); 12388 mbox->vport = hrq->phba->pport; 12389 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12390 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 12391 /* The IOCTL status is embedded in the mailbox subheader. */ 12392 shdr = (union lpfc_sli4_cfg_shdr *) 12393 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 12394 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12395 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12396 if (shdr_status || shdr_add_status || rc) { 12397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12398 "2509 RQ_DESTROY mailbox failed with " 12399 "status x%x add_status x%x, mbx status x%x\n", 12400 shdr_status, shdr_add_status, rc); 12401 if (rc != MBX_TIMEOUT) 12402 mempool_free(mbox, hrq->phba->mbox_mem_pool); 12403 return -ENXIO; 12404 } 12405 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 12406 drq->queue_id); 12407 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 12408 shdr = (union lpfc_sli4_cfg_shdr *) 12409 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 12410 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12411 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12412 if (shdr_status || shdr_add_status || rc) { 12413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12414 "2510 RQ_DESTROY mailbox failed with " 12415 "status x%x add_status x%x, mbx status x%x\n", 12416 shdr_status, shdr_add_status, rc); 12417 status = -ENXIO; 12418 } 12419 list_del_init(&hrq->list); 12420 list_del_init(&drq->list); 12421 mempool_free(mbox, hrq->phba->mbox_mem_pool); 12422 return status; 12423 } 12424 12425 /** 12426 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 12427 * @phba: The virtual port for which this call being executed. 12428 * @pdma_phys_addr0: Physical address of the 1st SGL page. 12429 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 12430 * @xritag: the xritag that ties this io to the SGL pages. 12431 * 12432 * This routine will post the sgl pages for the IO that has the xritag 12433 * that is in the iocbq structure. The xritag is assigned during iocbq 12434 * creation and persists for as long as the driver is loaded. 12435 * if the caller has fewer than 256 scatter gather segments to map then 12436 * pdma_phys_addr1 should be 0. 12437 * If the caller needs to map more than 256 scatter gather segment then 12438 * pdma_phys_addr1 should be a valid physical address. 12439 * physical address for SGLs must be 64 byte aligned. 12440 * If you are going to map 2 SGL's then the first one must have 256 entries 12441 * the second sgl can have between 1 and 256 entries. 12442 * 12443 * Return codes: 12444 * 0 - Success 12445 * -ENXIO, -ENOMEM - Failure 12446 **/ 12447 int 12448 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 12449 dma_addr_t pdma_phys_addr0, 12450 dma_addr_t pdma_phys_addr1, 12451 uint16_t xritag) 12452 { 12453 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 12454 LPFC_MBOXQ_t *mbox; 12455 int rc; 12456 uint32_t shdr_status, shdr_add_status; 12457 uint32_t mbox_tmo; 12458 union lpfc_sli4_cfg_shdr *shdr; 12459 12460 if (xritag == NO_XRI) { 12461 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12462 "0364 Invalid param:\n"); 12463 return -EINVAL; 12464 } 12465 12466 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12467 if (!mbox) 12468 return -ENOMEM; 12469 12470 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12471 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 12472 sizeof(struct lpfc_mbx_post_sgl_pages) - 12473 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 12474 12475 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 12476 &mbox->u.mqe.un.post_sgl_pages; 12477 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 12478 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 12479 12480 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 12481 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 12482 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 12483 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 12484 12485 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 12486 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 12487 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 12488 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 12489 if (!phba->sli4_hba.intr_enable) 12490 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12491 else { 12492 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 12493 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 12494 } 12495 /* The IOCTL status is embedded in the mailbox subheader. */ 12496 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 12497 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12498 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12499 if (rc != MBX_TIMEOUT) 12500 mempool_free(mbox, phba->mbox_mem_pool); 12501 if (shdr_status || shdr_add_status || rc) { 12502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12503 "2511 POST_SGL mailbox failed with " 12504 "status x%x add_status x%x, mbx status x%x\n", 12505 shdr_status, shdr_add_status, rc); 12506 rc = -ENXIO; 12507 } 12508 return 0; 12509 } 12510 12511 /** 12512 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 12513 * @phba: pointer to lpfc hba data structure. 12514 * 12515 * This routine is invoked to post rpi header templates to the 12516 * HBA consistent with the SLI-4 interface spec. This routine 12517 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 12518 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 12519 * 12520 * Returns 12521 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 12522 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 12523 **/ 12524 uint16_t 12525 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 12526 { 12527 unsigned long xri; 12528 12529 /* 12530 * Fetch the next logical xri. Because this index is logical, 12531 * the driver starts at 0 each time. 12532 */ 12533 spin_lock_irq(&phba->hbalock); 12534 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 12535 phba->sli4_hba.max_cfg_param.max_xri, 0); 12536 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 12537 spin_unlock_irq(&phba->hbalock); 12538 return NO_XRI; 12539 } else { 12540 set_bit(xri, phba->sli4_hba.xri_bmask); 12541 phba->sli4_hba.max_cfg_param.xri_used++; 12542 phba->sli4_hba.xri_count++; 12543 } 12544 12545 spin_unlock_irq(&phba->hbalock); 12546 return xri; 12547 } 12548 12549 /** 12550 * lpfc_sli4_free_xri - Release an xri for reuse. 12551 * @phba: pointer to lpfc hba data structure. 12552 * 12553 * This routine is invoked to release an xri to the pool of 12554 * available rpis maintained by the driver. 12555 **/ 12556 void 12557 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 12558 { 12559 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 12560 phba->sli4_hba.xri_count--; 12561 phba->sli4_hba.max_cfg_param.xri_used--; 12562 } 12563 } 12564 12565 /** 12566 * lpfc_sli4_free_xri - Release an xri for reuse. 12567 * @phba: pointer to lpfc hba data structure. 12568 * 12569 * This routine is invoked to release an xri to the pool of 12570 * available rpis maintained by the driver. 12571 **/ 12572 void 12573 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 12574 { 12575 spin_lock_irq(&phba->hbalock); 12576 __lpfc_sli4_free_xri(phba, xri); 12577 spin_unlock_irq(&phba->hbalock); 12578 } 12579 12580 /** 12581 * lpfc_sli4_next_xritag - Get an xritag for the io 12582 * @phba: Pointer to HBA context object. 12583 * 12584 * This function gets an xritag for the iocb. If there is no unused xritag 12585 * it will return 0xffff. 12586 * The function returns the allocated xritag if successful, else returns zero. 12587 * Zero is not a valid xritag. 12588 * The caller is not required to hold any lock. 12589 **/ 12590 uint16_t 12591 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 12592 { 12593 uint16_t xri_index; 12594 12595 xri_index = lpfc_sli4_alloc_xri(phba); 12596 if (xri_index != NO_XRI) 12597 return xri_index; 12598 12599 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12600 "2004 Failed to allocate XRI.last XRITAG is %d" 12601 " Max XRI is %d, Used XRI is %d\n", 12602 xri_index, 12603 phba->sli4_hba.max_cfg_param.max_xri, 12604 phba->sli4_hba.max_cfg_param.xri_used); 12605 return NO_XRI; 12606 } 12607 12608 /** 12609 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 12610 * @phba: pointer to lpfc hba data structure. 12611 * 12612 * This routine is invoked to post a block of driver's sgl pages to the 12613 * HBA using non-embedded mailbox command. No Lock is held. This routine 12614 * is only called when the driver is loading and after all IO has been 12615 * stopped. 12616 **/ 12617 int 12618 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) 12619 { 12620 struct lpfc_sglq *sglq_entry; 12621 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 12622 struct sgl_page_pairs *sgl_pg_pairs; 12623 void *viraddr; 12624 LPFC_MBOXQ_t *mbox; 12625 uint32_t reqlen, alloclen, pg_pairs; 12626 uint32_t mbox_tmo; 12627 uint16_t xritag_start = 0, lxri = 0; 12628 int els_xri_cnt, rc = 0; 12629 uint32_t shdr_status, shdr_add_status; 12630 union lpfc_sli4_cfg_shdr *shdr; 12631 12632 /* The number of sgls to be posted */ 12633 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 12634 12635 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 12636 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 12637 if (reqlen > SLI4_PAGE_SIZE) { 12638 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12639 "2559 Block sgl registration required DMA " 12640 "size (%d) great than a page\n", reqlen); 12641 return -ENOMEM; 12642 } 12643 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12644 if (!mbox) 12645 return -ENOMEM; 12646 12647 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12648 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12649 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 12650 LPFC_SLI4_MBX_NEMBED); 12651 12652 if (alloclen < reqlen) { 12653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12654 "0285 Allocated DMA memory size (%d) is " 12655 "less than the requested DMA memory " 12656 "size (%d)\n", alloclen, reqlen); 12657 lpfc_sli4_mbox_cmd_free(phba, mbox); 12658 return -ENOMEM; 12659 } 12660 /* Set up the SGL pages in the non-embedded DMA pages */ 12661 viraddr = mbox->sge_array->addr[0]; 12662 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 12663 sgl_pg_pairs = &sgl->sgl_pg_pairs; 12664 12665 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 12666 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 12667 12668 /* 12669 * Assign the sglq a physical xri only if the driver has not 12670 * initialized those resources. A port reset only needs 12671 * the sglq's posted. 12672 */ 12673 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 12674 LPFC_XRI_RSRC_RDY) { 12675 lxri = lpfc_sli4_next_xritag(phba); 12676 if (lxri == NO_XRI) { 12677 lpfc_sli4_mbox_cmd_free(phba, mbox); 12678 return -ENOMEM; 12679 } 12680 sglq_entry->sli4_lxritag = lxri; 12681 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 12682 } 12683 12684 /* Set up the sge entry */ 12685 sgl_pg_pairs->sgl_pg0_addr_lo = 12686 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 12687 sgl_pg_pairs->sgl_pg0_addr_hi = 12688 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 12689 sgl_pg_pairs->sgl_pg1_addr_lo = 12690 cpu_to_le32(putPaddrLow(0)); 12691 sgl_pg_pairs->sgl_pg1_addr_hi = 12692 cpu_to_le32(putPaddrHigh(0)); 12693 12694 /* Keep the first xritag on the list */ 12695 if (pg_pairs == 0) 12696 xritag_start = sglq_entry->sli4_xritag; 12697 sgl_pg_pairs++; 12698 } 12699 12700 /* Complete initialization and perform endian conversion. */ 12701 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 12702 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 12703 sgl->word0 = cpu_to_le32(sgl->word0); 12704 if (!phba->sli4_hba.intr_enable) 12705 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12706 else { 12707 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 12708 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 12709 } 12710 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 12711 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12712 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12713 if (rc != MBX_TIMEOUT) 12714 lpfc_sli4_mbox_cmd_free(phba, mbox); 12715 if (shdr_status || shdr_add_status || rc) { 12716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12717 "2513 POST_SGL_BLOCK mailbox command failed " 12718 "status x%x add_status x%x mbx status x%x\n", 12719 shdr_status, shdr_add_status, rc); 12720 rc = -ENXIO; 12721 } 12722 12723 if (rc == 0) 12724 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 12725 LPFC_XRI_RSRC_RDY); 12726 return rc; 12727 } 12728 12729 /** 12730 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port. 12731 * @phba: pointer to lpfc hba data structure. 12732 * 12733 * This routine is invoked to post a block of driver's sgl pages to the 12734 * HBA using non-embedded mailbox command. No Lock is held. This routine 12735 * is only called when the driver is loading and after all IO has been 12736 * stopped. 12737 **/ 12738 int 12739 lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) 12740 { 12741 struct lpfc_sglq *sglq_entry; 12742 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 12743 struct sgl_page_pairs *sgl_pg_pairs; 12744 void *viraddr; 12745 LPFC_MBOXQ_t *mbox; 12746 uint32_t reqlen, alloclen, index; 12747 uint32_t mbox_tmo; 12748 uint16_t rsrc_start, rsrc_size, els_xri_cnt; 12749 uint16_t xritag_start = 0, lxri = 0; 12750 struct lpfc_rsrc_blks *rsrc_blk; 12751 int cnt, ttl_cnt, rc = 0; 12752 int loop_cnt; 12753 uint32_t shdr_status, shdr_add_status; 12754 union lpfc_sli4_cfg_shdr *shdr; 12755 12756 /* The number of sgls to be posted */ 12757 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 12758 12759 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 12760 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 12761 if (reqlen > SLI4_PAGE_SIZE) { 12762 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12763 "2989 Block sgl registration required DMA " 12764 "size (%d) great than a page\n", reqlen); 12765 return -ENOMEM; 12766 } 12767 12768 cnt = 0; 12769 ttl_cnt = 0; 12770 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, 12771 list) { 12772 rsrc_start = rsrc_blk->rsrc_start; 12773 rsrc_size = rsrc_blk->rsrc_size; 12774 12775 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12776 "3014 Working ELS Extent start %d, cnt %d\n", 12777 rsrc_start, rsrc_size); 12778 12779 loop_cnt = min(els_xri_cnt, rsrc_size); 12780 if (ttl_cnt + loop_cnt >= els_xri_cnt) { 12781 loop_cnt = els_xri_cnt - ttl_cnt; 12782 ttl_cnt = els_xri_cnt; 12783 } 12784 12785 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12786 if (!mbox) 12787 return -ENOMEM; 12788 /* 12789 * Allocate DMA memory and set up the non-embedded mailbox 12790 * command. 12791 */ 12792 alloclen = lpfc_sli4_config(phba, mbox, 12793 LPFC_MBOX_SUBSYSTEM_FCOE, 12794 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 12795 reqlen, LPFC_SLI4_MBX_NEMBED); 12796 if (alloclen < reqlen) { 12797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12798 "2987 Allocated DMA memory size (%d) " 12799 "is less than the requested DMA memory " 12800 "size (%d)\n", alloclen, reqlen); 12801 lpfc_sli4_mbox_cmd_free(phba, mbox); 12802 return -ENOMEM; 12803 } 12804 12805 /* Set up the SGL pages in the non-embedded DMA pages */ 12806 viraddr = mbox->sge_array->addr[0]; 12807 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 12808 sgl_pg_pairs = &sgl->sgl_pg_pairs; 12809 12810 /* 12811 * The starting resource may not begin at zero. Control 12812 * the loop variants via the block resource parameters, 12813 * but handle the sge pointers with a zero-based index 12814 * that doesn't get reset per loop pass. 12815 */ 12816 for (index = rsrc_start; 12817 index < rsrc_start + loop_cnt; 12818 index++) { 12819 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt]; 12820 12821 /* 12822 * Assign the sglq a physical xri only if the driver 12823 * has not initialized those resources. A port reset 12824 * only needs the sglq's posted. 12825 */ 12826 if (bf_get(lpfc_xri_rsrc_rdy, 12827 &phba->sli4_hba.sli4_flags) != 12828 LPFC_XRI_RSRC_RDY) { 12829 lxri = lpfc_sli4_next_xritag(phba); 12830 if (lxri == NO_XRI) { 12831 lpfc_sli4_mbox_cmd_free(phba, mbox); 12832 rc = -ENOMEM; 12833 goto err_exit; 12834 } 12835 sglq_entry->sli4_lxritag = lxri; 12836 sglq_entry->sli4_xritag = 12837 phba->sli4_hba.xri_ids[lxri]; 12838 } 12839 12840 /* Set up the sge entry */ 12841 sgl_pg_pairs->sgl_pg0_addr_lo = 12842 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 12843 sgl_pg_pairs->sgl_pg0_addr_hi = 12844 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 12845 sgl_pg_pairs->sgl_pg1_addr_lo = 12846 cpu_to_le32(putPaddrLow(0)); 12847 sgl_pg_pairs->sgl_pg1_addr_hi = 12848 cpu_to_le32(putPaddrHigh(0)); 12849 12850 /* Track the starting physical XRI for the mailbox. */ 12851 if (index == rsrc_start) 12852 xritag_start = sglq_entry->sli4_xritag; 12853 sgl_pg_pairs++; 12854 cnt++; 12855 } 12856 12857 /* Complete initialization and perform endian conversion. */ 12858 rsrc_blk->rsrc_used += loop_cnt; 12859 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 12860 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt); 12861 sgl->word0 = cpu_to_le32(sgl->word0); 12862 12863 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12864 "3015 Post ELS Extent SGL, start %d, " 12865 "cnt %d, used %d\n", 12866 xritag_start, loop_cnt, rsrc_blk->rsrc_used); 12867 if (!phba->sli4_hba.intr_enable) 12868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12869 else { 12870 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 12871 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 12872 } 12873 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 12874 shdr_status = bf_get(lpfc_mbox_hdr_status, 12875 &shdr->response); 12876 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12877 &shdr->response); 12878 if (rc != MBX_TIMEOUT) 12879 lpfc_sli4_mbox_cmd_free(phba, mbox); 12880 if (shdr_status || shdr_add_status || rc) { 12881 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12882 "2988 POST_SGL_BLOCK mailbox " 12883 "command failed status x%x " 12884 "add_status x%x mbx status x%x\n", 12885 shdr_status, shdr_add_status, rc); 12886 rc = -ENXIO; 12887 goto err_exit; 12888 } 12889 if (ttl_cnt >= els_xri_cnt) 12890 break; 12891 } 12892 12893 err_exit: 12894 if (rc == 0) 12895 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 12896 LPFC_XRI_RSRC_RDY); 12897 return rc; 12898 } 12899 12900 /** 12901 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 12902 * @phba: pointer to lpfc hba data structure. 12903 * @sblist: pointer to scsi buffer list. 12904 * @count: number of scsi buffers on the list. 12905 * 12906 * This routine is invoked to post a block of @count scsi sgl pages from a 12907 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 12908 * No Lock is held. 12909 * 12910 **/ 12911 int 12912 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 12913 int cnt) 12914 { 12915 struct lpfc_scsi_buf *psb; 12916 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 12917 struct sgl_page_pairs *sgl_pg_pairs; 12918 void *viraddr; 12919 LPFC_MBOXQ_t *mbox; 12920 uint32_t reqlen, alloclen, pg_pairs; 12921 uint32_t mbox_tmo; 12922 uint16_t xritag_start = 0; 12923 int rc = 0; 12924 uint32_t shdr_status, shdr_add_status; 12925 dma_addr_t pdma_phys_bpl1; 12926 union lpfc_sli4_cfg_shdr *shdr; 12927 12928 /* Calculate the requested length of the dma memory */ 12929 reqlen = cnt * sizeof(struct sgl_page_pairs) + 12930 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 12931 if (reqlen > SLI4_PAGE_SIZE) { 12932 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 12933 "0217 Block sgl registration required DMA " 12934 "size (%d) great than a page\n", reqlen); 12935 return -ENOMEM; 12936 } 12937 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12938 if (!mbox) { 12939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12940 "0283 Failed to allocate mbox cmd memory\n"); 12941 return -ENOMEM; 12942 } 12943 12944 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12945 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12946 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 12947 LPFC_SLI4_MBX_NEMBED); 12948 12949 if (alloclen < reqlen) { 12950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12951 "2561 Allocated DMA memory size (%d) is " 12952 "less than the requested DMA memory " 12953 "size (%d)\n", alloclen, reqlen); 12954 lpfc_sli4_mbox_cmd_free(phba, mbox); 12955 return -ENOMEM; 12956 } 12957 12958 /* Get the first SGE entry from the non-embedded DMA memory */ 12959 viraddr = mbox->sge_array->addr[0]; 12960 12961 /* Set up the SGL pages in the non-embedded DMA pages */ 12962 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 12963 sgl_pg_pairs = &sgl->sgl_pg_pairs; 12964 12965 pg_pairs = 0; 12966 list_for_each_entry(psb, sblist, list) { 12967 /* Set up the sge entry */ 12968 sgl_pg_pairs->sgl_pg0_addr_lo = 12969 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 12970 sgl_pg_pairs->sgl_pg0_addr_hi = 12971 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 12972 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 12973 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 12974 else 12975 pdma_phys_bpl1 = 0; 12976 sgl_pg_pairs->sgl_pg1_addr_lo = 12977 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 12978 sgl_pg_pairs->sgl_pg1_addr_hi = 12979 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 12980 /* Keep the first xritag on the list */ 12981 if (pg_pairs == 0) 12982 xritag_start = psb->cur_iocbq.sli4_xritag; 12983 sgl_pg_pairs++; 12984 pg_pairs++; 12985 } 12986 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 12987 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 12988 /* Perform endian conversion if necessary */ 12989 sgl->word0 = cpu_to_le32(sgl->word0); 12990 12991 if (!phba->sli4_hba.intr_enable) 12992 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12993 else { 12994 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 12995 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 12996 } 12997 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 12998 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12999 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13000 if (rc != MBX_TIMEOUT) 13001 lpfc_sli4_mbox_cmd_free(phba, mbox); 13002 if (shdr_status || shdr_add_status || rc) { 13003 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13004 "2564 POST_SGL_BLOCK mailbox command failed " 13005 "status x%x add_status x%x mbx status x%x\n", 13006 shdr_status, shdr_add_status, rc); 13007 rc = -ENXIO; 13008 } 13009 return rc; 13010 } 13011 13012 /** 13013 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port. 13014 * @phba: pointer to lpfc hba data structure. 13015 * @sblist: pointer to scsi buffer list. 13016 * @count: number of scsi buffers on the list. 13017 * 13018 * This routine is invoked to post a block of @count scsi sgl pages from a 13019 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 13020 * No Lock is held. 13021 * 13022 **/ 13023 int 13024 lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist, 13025 int cnt) 13026 { 13027 struct lpfc_scsi_buf *psb = NULL; 13028 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 13029 struct sgl_page_pairs *sgl_pg_pairs; 13030 void *viraddr; 13031 LPFC_MBOXQ_t *mbox; 13032 uint32_t reqlen, alloclen, pg_pairs; 13033 uint32_t mbox_tmo; 13034 uint16_t xri_start = 0, scsi_xri_start; 13035 uint16_t rsrc_range; 13036 int rc = 0, avail_cnt; 13037 uint32_t shdr_status, shdr_add_status; 13038 dma_addr_t pdma_phys_bpl1; 13039 union lpfc_sli4_cfg_shdr *shdr; 13040 struct lpfc_rsrc_blks *rsrc_blk; 13041 uint32_t xri_cnt = 0; 13042 13043 /* Calculate the total requested length of the dma memory */ 13044 reqlen = cnt * sizeof(struct sgl_page_pairs) + 13045 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13046 if (reqlen > SLI4_PAGE_SIZE) { 13047 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 13048 "2932 Block sgl registration required DMA " 13049 "size (%d) great than a page\n", reqlen); 13050 return -ENOMEM; 13051 } 13052 13053 /* 13054 * The use of extents requires the driver to post the sgl headers 13055 * in multiple postings to meet the contiguous resource assignment. 13056 */ 13057 psb = list_prepare_entry(psb, sblist, list); 13058 scsi_xri_start = phba->sli4_hba.scsi_xri_start; 13059 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, 13060 list) { 13061 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size; 13062 if (rsrc_range < scsi_xri_start) 13063 continue; 13064 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size) 13065 continue; 13066 else 13067 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used; 13068 13069 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) + 13070 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 13071 /* 13072 * Allocate DMA memory and set up the non-embedded mailbox 13073 * command. The mbox is used to post an SGL page per loop 13074 * but the DMA memory has a use-once semantic so the mailbox 13075 * is used and freed per loop pass. 13076 */ 13077 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13078 if (!mbox) { 13079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13080 "2933 Failed to allocate mbox cmd " 13081 "memory\n"); 13082 return -ENOMEM; 13083 } 13084 alloclen = lpfc_sli4_config(phba, mbox, 13085 LPFC_MBOX_SUBSYSTEM_FCOE, 13086 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 13087 reqlen, 13088 LPFC_SLI4_MBX_NEMBED); 13089 if (alloclen < reqlen) { 13090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13091 "2934 Allocated DMA memory size (%d) " 13092 "is less than the requested DMA memory " 13093 "size (%d)\n", alloclen, reqlen); 13094 lpfc_sli4_mbox_cmd_free(phba, mbox); 13095 return -ENOMEM; 13096 } 13097 13098 /* Get the first SGE entry from the non-embedded DMA memory */ 13099 viraddr = mbox->sge_array->addr[0]; 13100 13101 /* Set up the SGL pages in the non-embedded DMA pages */ 13102 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 13103 sgl_pg_pairs = &sgl->sgl_pg_pairs; 13104 13105 /* pg_pairs tracks posted SGEs per loop iteration. */ 13106 pg_pairs = 0; 13107 list_for_each_entry_continue(psb, sblist, list) { 13108 /* Set up the sge entry */ 13109 sgl_pg_pairs->sgl_pg0_addr_lo = 13110 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 13111 sgl_pg_pairs->sgl_pg0_addr_hi = 13112 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 13113 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 13114 pdma_phys_bpl1 = psb->dma_phys_bpl + 13115 SGL_PAGE_SIZE; 13116 else 13117 pdma_phys_bpl1 = 0; 13118 sgl_pg_pairs->sgl_pg1_addr_lo = 13119 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 13120 sgl_pg_pairs->sgl_pg1_addr_hi = 13121 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 13122 /* Keep the first xri for this extent. */ 13123 if (pg_pairs == 0) 13124 xri_start = psb->cur_iocbq.sli4_xritag; 13125 sgl_pg_pairs++; 13126 pg_pairs++; 13127 xri_cnt++; 13128 13129 /* 13130 * Track two exit conditions - the loop has constructed 13131 * all of the caller's SGE pairs or all available 13132 * resource IDs in this extent are consumed. 13133 */ 13134 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt)) 13135 break; 13136 } 13137 rsrc_blk->rsrc_used += pg_pairs; 13138 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start); 13139 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 13140 13141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13142 "3016 Post SCSI Extent SGL, start %d, cnt %d " 13143 "blk use %d\n", 13144 xri_start, pg_pairs, rsrc_blk->rsrc_used); 13145 /* Perform endian conversion if necessary */ 13146 sgl->word0 = cpu_to_le32(sgl->word0); 13147 if (!phba->sli4_hba.intr_enable) 13148 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13149 else { 13150 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 13151 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13152 } 13153 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13154 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13155 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13156 &shdr->response); 13157 if (rc != MBX_TIMEOUT) 13158 lpfc_sli4_mbox_cmd_free(phba, mbox); 13159 if (shdr_status || shdr_add_status || rc) { 13160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13161 "2935 POST_SGL_BLOCK mailbox command " 13162 "failed status x%x add_status x%x " 13163 "mbx status x%x\n", 13164 shdr_status, shdr_add_status, rc); 13165 return -ENXIO; 13166 } 13167 13168 /* Post only what is requested. */ 13169 if (xri_cnt >= cnt) 13170 break; 13171 } 13172 return rc; 13173 } 13174 13175 /** 13176 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 13177 * @phba: pointer to lpfc_hba struct that the frame was received on 13178 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13179 * 13180 * This function checks the fields in the @fc_hdr to see if the FC frame is a 13181 * valid type of frame that the LPFC driver will handle. This function will 13182 * return a zero if the frame is a valid frame or a non zero value when the 13183 * frame does not pass the check. 13184 **/ 13185 static int 13186 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 13187 { 13188 /* make rctl_names static to save stack space */ 13189 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 13190 char *type_names[] = FC_TYPE_NAMES_INIT; 13191 struct fc_vft_header *fc_vft_hdr; 13192 uint32_t *header = (uint32_t *) fc_hdr; 13193 13194 switch (fc_hdr->fh_r_ctl) { 13195 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 13196 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 13197 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 13198 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 13199 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 13200 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 13201 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 13202 case FC_RCTL_DD_CMD_STATUS: /* command status */ 13203 case FC_RCTL_ELS_REQ: /* extended link services request */ 13204 case FC_RCTL_ELS_REP: /* extended link services reply */ 13205 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 13206 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 13207 case FC_RCTL_BA_NOP: /* basic link service NOP */ 13208 case FC_RCTL_BA_ABTS: /* basic link service abort */ 13209 case FC_RCTL_BA_RMC: /* remove connection */ 13210 case FC_RCTL_BA_ACC: /* basic accept */ 13211 case FC_RCTL_BA_RJT: /* basic reject */ 13212 case FC_RCTL_BA_PRMT: 13213 case FC_RCTL_ACK_1: /* acknowledge_1 */ 13214 case FC_RCTL_ACK_0: /* acknowledge_0 */ 13215 case FC_RCTL_P_RJT: /* port reject */ 13216 case FC_RCTL_F_RJT: /* fabric reject */ 13217 case FC_RCTL_P_BSY: /* port busy */ 13218 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 13219 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 13220 case FC_RCTL_LCR: /* link credit reset */ 13221 case FC_RCTL_END: /* end */ 13222 break; 13223 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 13224 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13225 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 13226 return lpfc_fc_frame_check(phba, fc_hdr); 13227 default: 13228 goto drop; 13229 } 13230 switch (fc_hdr->fh_type) { 13231 case FC_TYPE_BLS: 13232 case FC_TYPE_ELS: 13233 case FC_TYPE_FCP: 13234 case FC_TYPE_CT: 13235 break; 13236 case FC_TYPE_IP: 13237 case FC_TYPE_ILS: 13238 default: 13239 goto drop; 13240 } 13241 13242 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13243 "2538 Received frame rctl:%s type:%s " 13244 "Frame Data:%08x %08x %08x %08x %08x %08x\n", 13245 rctl_names[fc_hdr->fh_r_ctl], 13246 type_names[fc_hdr->fh_type], 13247 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 13248 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 13249 be32_to_cpu(header[4]), be32_to_cpu(header[5])); 13250 return 0; 13251 drop: 13252 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 13253 "2539 Dropped frame rctl:%s type:%s\n", 13254 rctl_names[fc_hdr->fh_r_ctl], 13255 type_names[fc_hdr->fh_type]); 13256 return 1; 13257 } 13258 13259 /** 13260 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 13261 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13262 * 13263 * This function processes the FC header to retrieve the VFI from the VF 13264 * header, if one exists. This function will return the VFI if one exists 13265 * or 0 if no VSAN Header exists. 13266 **/ 13267 static uint32_t 13268 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 13269 { 13270 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 13271 13272 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 13273 return 0; 13274 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 13275 } 13276 13277 /** 13278 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 13279 * @phba: Pointer to the HBA structure to search for the vport on 13280 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 13281 * @fcfi: The FC Fabric ID that the frame came from 13282 * 13283 * This function searches the @phba for a vport that matches the content of the 13284 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 13285 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 13286 * returns the matching vport pointer or NULL if unable to match frame to a 13287 * vport. 13288 **/ 13289 static struct lpfc_vport * 13290 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 13291 uint16_t fcfi) 13292 { 13293 struct lpfc_vport **vports; 13294 struct lpfc_vport *vport = NULL; 13295 int i; 13296 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 13297 fc_hdr->fh_d_id[1] << 8 | 13298 fc_hdr->fh_d_id[2]); 13299 13300 vports = lpfc_create_vport_work_array(phba); 13301 if (vports != NULL) 13302 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 13303 if (phba->fcf.fcfi == fcfi && 13304 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 13305 vports[i]->fc_myDID == did) { 13306 vport = vports[i]; 13307 break; 13308 } 13309 } 13310 lpfc_destroy_vport_work_array(phba, vports); 13311 return vport; 13312 } 13313 13314 /** 13315 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 13316 * @vport: The vport to work on. 13317 * 13318 * This function updates the receive sequence time stamp for this vport. The 13319 * receive sequence time stamp indicates the time that the last frame of the 13320 * the sequence that has been idle for the longest amount of time was received. 13321 * the driver uses this time stamp to indicate if any received sequences have 13322 * timed out. 13323 **/ 13324 void 13325 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 13326 { 13327 struct lpfc_dmabuf *h_buf; 13328 struct hbq_dmabuf *dmabuf = NULL; 13329 13330 /* get the oldest sequence on the rcv list */ 13331 h_buf = list_get_first(&vport->rcv_buffer_list, 13332 struct lpfc_dmabuf, list); 13333 if (!h_buf) 13334 return; 13335 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13336 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 13337 } 13338 13339 /** 13340 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 13341 * @vport: The vport that the received sequences were sent to. 13342 * 13343 * This function cleans up all outstanding received sequences. This is called 13344 * by the driver when a link event or user action invalidates all the received 13345 * sequences. 13346 **/ 13347 void 13348 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 13349 { 13350 struct lpfc_dmabuf *h_buf, *hnext; 13351 struct lpfc_dmabuf *d_buf, *dnext; 13352 struct hbq_dmabuf *dmabuf = NULL; 13353 13354 /* start with the oldest sequence on the rcv list */ 13355 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13356 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13357 list_del_init(&dmabuf->hbuf.list); 13358 list_for_each_entry_safe(d_buf, dnext, 13359 &dmabuf->dbuf.list, list) { 13360 list_del_init(&d_buf->list); 13361 lpfc_in_buf_free(vport->phba, d_buf); 13362 } 13363 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13364 } 13365 } 13366 13367 /** 13368 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 13369 * @vport: The vport that the received sequences were sent to. 13370 * 13371 * This function determines whether any received sequences have timed out by 13372 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 13373 * indicates that there is at least one timed out sequence this routine will 13374 * go through the received sequences one at a time from most inactive to most 13375 * active to determine which ones need to be cleaned up. Once it has determined 13376 * that a sequence needs to be cleaned up it will simply free up the resources 13377 * without sending an abort. 13378 **/ 13379 void 13380 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 13381 { 13382 struct lpfc_dmabuf *h_buf, *hnext; 13383 struct lpfc_dmabuf *d_buf, *dnext; 13384 struct hbq_dmabuf *dmabuf = NULL; 13385 unsigned long timeout; 13386 int abort_count = 0; 13387 13388 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13389 vport->rcv_buffer_time_stamp); 13390 if (list_empty(&vport->rcv_buffer_list) || 13391 time_before(jiffies, timeout)) 13392 return; 13393 /* start with the oldest sequence on the rcv list */ 13394 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 13395 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13396 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 13397 dmabuf->time_stamp); 13398 if (time_before(jiffies, timeout)) 13399 break; 13400 abort_count++; 13401 list_del_init(&dmabuf->hbuf.list); 13402 list_for_each_entry_safe(d_buf, dnext, 13403 &dmabuf->dbuf.list, list) { 13404 list_del_init(&d_buf->list); 13405 lpfc_in_buf_free(vport->phba, d_buf); 13406 } 13407 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 13408 } 13409 if (abort_count) 13410 lpfc_update_rcv_time_stamp(vport); 13411 } 13412 13413 /** 13414 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 13415 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 13416 * 13417 * This function searches through the existing incomplete sequences that have 13418 * been sent to this @vport. If the frame matches one of the incomplete 13419 * sequences then the dbuf in the @dmabuf is added to the list of frames that 13420 * make up that sequence. If no sequence is found that matches this frame then 13421 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 13422 * This function returns a pointer to the first dmabuf in the sequence list that 13423 * the frame was linked to. 13424 **/ 13425 static struct hbq_dmabuf * 13426 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 13427 { 13428 struct fc_frame_header *new_hdr; 13429 struct fc_frame_header *temp_hdr; 13430 struct lpfc_dmabuf *d_buf; 13431 struct lpfc_dmabuf *h_buf; 13432 struct hbq_dmabuf *seq_dmabuf = NULL; 13433 struct hbq_dmabuf *temp_dmabuf = NULL; 13434 13435 INIT_LIST_HEAD(&dmabuf->dbuf.list); 13436 dmabuf->time_stamp = jiffies; 13437 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13438 /* Use the hdr_buf to find the sequence that this frame belongs to */ 13439 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 13440 temp_hdr = (struct fc_frame_header *)h_buf->virt; 13441 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 13442 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 13443 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 13444 continue; 13445 /* found a pending sequence that matches this frame */ 13446 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13447 break; 13448 } 13449 if (!seq_dmabuf) { 13450 /* 13451 * This indicates first frame received for this sequence. 13452 * Queue the buffer on the vport's rcv_buffer_list. 13453 */ 13454 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 13455 lpfc_update_rcv_time_stamp(vport); 13456 return dmabuf; 13457 } 13458 temp_hdr = seq_dmabuf->hbuf.virt; 13459 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 13460 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 13461 list_del_init(&seq_dmabuf->hbuf.list); 13462 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 13463 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 13464 lpfc_update_rcv_time_stamp(vport); 13465 return dmabuf; 13466 } 13467 /* move this sequence to the tail to indicate a young sequence */ 13468 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 13469 seq_dmabuf->time_stamp = jiffies; 13470 lpfc_update_rcv_time_stamp(vport); 13471 if (list_empty(&seq_dmabuf->dbuf.list)) { 13472 temp_hdr = dmabuf->hbuf.virt; 13473 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 13474 return seq_dmabuf; 13475 } 13476 /* find the correct place in the sequence to insert this frame */ 13477 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 13478 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 13479 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 13480 /* 13481 * If the frame's sequence count is greater than the frame on 13482 * the list then insert the frame right after this frame 13483 */ 13484 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 13485 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 13486 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 13487 return seq_dmabuf; 13488 } 13489 } 13490 return NULL; 13491 } 13492 13493 /** 13494 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 13495 * @vport: pointer to a vitural port 13496 * @dmabuf: pointer to a dmabuf that describes the FC sequence 13497 * 13498 * This function tries to abort from the partially assembed sequence, described 13499 * by the information from basic abbort @dmabuf. It checks to see whether such 13500 * partially assembled sequence held by the driver. If so, it shall free up all 13501 * the frames from the partially assembled sequence. 13502 * 13503 * Return 13504 * true -- if there is matching partially assembled sequence present and all 13505 * the frames freed with the sequence; 13506 * false -- if there is no matching partially assembled sequence present so 13507 * nothing got aborted in the lower layer driver 13508 **/ 13509 static bool 13510 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 13511 struct hbq_dmabuf *dmabuf) 13512 { 13513 struct fc_frame_header *new_hdr; 13514 struct fc_frame_header *temp_hdr; 13515 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 13516 struct hbq_dmabuf *seq_dmabuf = NULL; 13517 13518 /* Use the hdr_buf to find the sequence that matches this frame */ 13519 INIT_LIST_HEAD(&dmabuf->dbuf.list); 13520 INIT_LIST_HEAD(&dmabuf->hbuf.list); 13521 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13522 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 13523 temp_hdr = (struct fc_frame_header *)h_buf->virt; 13524 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 13525 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 13526 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 13527 continue; 13528 /* found a pending sequence that matches this frame */ 13529 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 13530 break; 13531 } 13532 13533 /* Free up all the frames from the partially assembled sequence */ 13534 if (seq_dmabuf) { 13535 list_for_each_entry_safe(d_buf, n_buf, 13536 &seq_dmabuf->dbuf.list, list) { 13537 list_del_init(&d_buf->list); 13538 lpfc_in_buf_free(vport->phba, d_buf); 13539 } 13540 return true; 13541 } 13542 return false; 13543 } 13544 13545 /** 13546 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 13547 * @phba: Pointer to HBA context object. 13548 * @cmd_iocbq: pointer to the command iocbq structure. 13549 * @rsp_iocbq: pointer to the response iocbq structure. 13550 * 13551 * This function handles the sequence abort response iocb command complete 13552 * event. It properly releases the memory allocated to the sequence abort 13553 * accept iocb. 13554 **/ 13555 static void 13556 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 13557 struct lpfc_iocbq *cmd_iocbq, 13558 struct lpfc_iocbq *rsp_iocbq) 13559 { 13560 if (cmd_iocbq) 13561 lpfc_sli_release_iocbq(phba, cmd_iocbq); 13562 } 13563 13564 /** 13565 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 13566 * @phba: Pointer to HBA context object. 13567 * @xri: xri id in transaction. 13568 * 13569 * This function validates the xri maps to the known range of XRIs allocated an 13570 * used by the driver. 13571 **/ 13572 uint16_t 13573 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 13574 uint16_t xri) 13575 { 13576 int i; 13577 13578 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 13579 if (xri == phba->sli4_hba.xri_ids[i]) 13580 return i; 13581 } 13582 return NO_XRI; 13583 } 13584 13585 13586 /** 13587 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 13588 * @phba: Pointer to HBA context object. 13589 * @fc_hdr: pointer to a FC frame header. 13590 * 13591 * This function sends a basic response to a previous unsol sequence abort 13592 * event after aborting the sequence handling. 13593 **/ 13594 static void 13595 lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, 13596 struct fc_frame_header *fc_hdr) 13597 { 13598 struct lpfc_iocbq *ctiocb = NULL; 13599 struct lpfc_nodelist *ndlp; 13600 uint16_t oxid, rxid; 13601 uint32_t sid, fctl; 13602 IOCB_t *icmd; 13603 int rc; 13604 13605 if (!lpfc_is_link_up(phba)) 13606 return; 13607 13608 sid = sli4_sid_from_fc_hdr(fc_hdr); 13609 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 13610 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 13611 13612 ndlp = lpfc_findnode_did(phba->pport, sid); 13613 if (!ndlp) { 13614 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 13615 "1268 Find ndlp returned NULL for oxid:x%x " 13616 "SID:x%x\n", oxid, sid); 13617 return; 13618 } 13619 if (lpfc_sli4_xri_inrange(phba, rxid)) 13620 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 13621 13622 /* Allocate buffer for rsp iocb */ 13623 ctiocb = lpfc_sli_get_iocbq(phba); 13624 if (!ctiocb) 13625 return; 13626 13627 /* Extract the F_CTL field from FC_HDR */ 13628 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 13629 13630 icmd = &ctiocb->iocb; 13631 icmd->un.xseq64.bdl.bdeSize = 0; 13632 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 13633 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 13634 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 13635 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 13636 13637 /* Fill in the rest of iocb fields */ 13638 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 13639 icmd->ulpBdeCount = 0; 13640 icmd->ulpLe = 1; 13641 icmd->ulpClass = CLASS3; 13642 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 13643 ctiocb->context1 = ndlp; 13644 13645 ctiocb->iocb_cmpl = NULL; 13646 ctiocb->vport = phba->pport; 13647 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 13648 ctiocb->sli4_lxritag = NO_XRI; 13649 ctiocb->sli4_xritag = NO_XRI; 13650 13651 /* If the oxid maps to the FCP XRI range or if it is out of range, 13652 * send a BLS_RJT. The driver no longer has that exchange. 13653 * Override the IOCB for a BA_RJT. 13654 */ 13655 if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + 13656 phba->sli4_hba.max_cfg_param.xri_base) || 13657 oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + 13658 phba->sli4_hba.max_cfg_param.xri_base)) { 13659 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 13660 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 13661 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 13662 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 13663 } 13664 13665 if (fctl & FC_FC_EX_CTX) { 13666 /* ABTS sent by responder to CT exchange, construction 13667 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 13668 * field and RX_ID from ABTS for RX_ID field. 13669 */ 13670 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 13671 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 13672 } else { 13673 /* ABTS sent by initiator to CT exchange, construction 13674 * of BA_ACC will need to allocate a new XRI as for the 13675 * XRI_TAG and RX_ID fields. 13676 */ 13677 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 13678 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); 13679 } 13680 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 13681 13682 /* Xmit CT abts response on exchange <xid> */ 13683 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 13684 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 13685 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 13686 13687 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 13688 if (rc == IOCB_ERROR) { 13689 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 13690 "2925 Failed to issue CT ABTS RSP x%x on " 13691 "xri x%x, Data x%x\n", 13692 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 13693 phba->link_state); 13694 lpfc_sli_release_iocbq(phba, ctiocb); 13695 } 13696 } 13697 13698 /** 13699 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 13700 * @vport: Pointer to the vport on which this sequence was received 13701 * @dmabuf: pointer to a dmabuf that describes the FC sequence 13702 * 13703 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 13704 * receive sequence is only partially assembed by the driver, it shall abort 13705 * the partially assembled frames for the sequence. Otherwise, if the 13706 * unsolicited receive sequence has been completely assembled and passed to 13707 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 13708 * unsolicited sequence has been aborted. After that, it will issue a basic 13709 * accept to accept the abort. 13710 **/ 13711 void 13712 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 13713 struct hbq_dmabuf *dmabuf) 13714 { 13715 struct lpfc_hba *phba = vport->phba; 13716 struct fc_frame_header fc_hdr; 13717 uint32_t fctl; 13718 bool abts_par; 13719 13720 /* Make a copy of fc_hdr before the dmabuf being released */ 13721 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 13722 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 13723 13724 if (fctl & FC_FC_EX_CTX) { 13725 /* 13726 * ABTS sent by responder to exchange, just free the buffer 13727 */ 13728 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13729 } else { 13730 /* 13731 * ABTS sent by initiator to exchange, need to do cleanup 13732 */ 13733 /* Try to abort partially assembled seq */ 13734 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); 13735 13736 /* Send abort to ULP if partially seq abort failed */ 13737 if (abts_par == false) 13738 lpfc_sli4_send_seq_to_ulp(vport, dmabuf); 13739 else 13740 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13741 } 13742 /* Send basic accept (BA_ACC) to the abort requester */ 13743 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); 13744 } 13745 13746 /** 13747 * lpfc_seq_complete - Indicates if a sequence is complete 13748 * @dmabuf: pointer to a dmabuf that describes the FC sequence 13749 * 13750 * This function checks the sequence, starting with the frame described by 13751 * @dmabuf, to see if all the frames associated with this sequence are present. 13752 * the frames associated with this sequence are linked to the @dmabuf using the 13753 * dbuf list. This function looks for two major things. 1) That the first frame 13754 * has a sequence count of zero. 2) There is a frame with last frame of sequence 13755 * set. 3) That there are no holes in the sequence count. The function will 13756 * return 1 when the sequence is complete, otherwise it will return 0. 13757 **/ 13758 static int 13759 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 13760 { 13761 struct fc_frame_header *hdr; 13762 struct lpfc_dmabuf *d_buf; 13763 struct hbq_dmabuf *seq_dmabuf; 13764 uint32_t fctl; 13765 int seq_count = 0; 13766 13767 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13768 /* make sure first fame of sequence has a sequence count of zero */ 13769 if (hdr->fh_seq_cnt != seq_count) 13770 return 0; 13771 fctl = (hdr->fh_f_ctl[0] << 16 | 13772 hdr->fh_f_ctl[1] << 8 | 13773 hdr->fh_f_ctl[2]); 13774 /* If last frame of sequence we can return success. */ 13775 if (fctl & FC_FC_END_SEQ) 13776 return 1; 13777 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 13778 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 13779 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 13780 /* If there is a hole in the sequence count then fail. */ 13781 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 13782 return 0; 13783 fctl = (hdr->fh_f_ctl[0] << 16 | 13784 hdr->fh_f_ctl[1] << 8 | 13785 hdr->fh_f_ctl[2]); 13786 /* If last frame of sequence we can return success. */ 13787 if (fctl & FC_FC_END_SEQ) 13788 return 1; 13789 } 13790 return 0; 13791 } 13792 13793 /** 13794 * lpfc_prep_seq - Prep sequence for ULP processing 13795 * @vport: Pointer to the vport on which this sequence was received 13796 * @dmabuf: pointer to a dmabuf that describes the FC sequence 13797 * 13798 * This function takes a sequence, described by a list of frames, and creates 13799 * a list of iocbq structures to describe the sequence. This iocbq list will be 13800 * used to issue to the generic unsolicited sequence handler. This routine 13801 * returns a pointer to the first iocbq in the list. If the function is unable 13802 * to allocate an iocbq then it throw out the received frames that were not 13803 * able to be described and return a pointer to the first iocbq. If unable to 13804 * allocate any iocbqs (including the first) this function will return NULL. 13805 **/ 13806 static struct lpfc_iocbq * 13807 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 13808 { 13809 struct hbq_dmabuf *hbq_buf; 13810 struct lpfc_dmabuf *d_buf, *n_buf; 13811 struct lpfc_iocbq *first_iocbq, *iocbq; 13812 struct fc_frame_header *fc_hdr; 13813 uint32_t sid; 13814 uint32_t len, tot_len; 13815 struct ulp_bde64 *pbde; 13816 13817 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 13818 /* remove from receive buffer list */ 13819 list_del_init(&seq_dmabuf->hbuf.list); 13820 lpfc_update_rcv_time_stamp(vport); 13821 /* get the Remote Port's SID */ 13822 sid = sli4_sid_from_fc_hdr(fc_hdr); 13823 tot_len = 0; 13824 /* Get an iocbq struct to fill in. */ 13825 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 13826 if (first_iocbq) { 13827 /* Initialize the first IOCB. */ 13828 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 13829 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13830 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13831 first_iocbq->iocb.ulpContext = NO_XRI; 13832 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 13833 be16_to_cpu(fc_hdr->fh_ox_id); 13834 /* iocbq is prepped for internal consumption. Physical vpi. */ 13835 first_iocbq->iocb.unsli3.rcvsli3.vpi = 13836 vport->phba->vpi_ids[vport->vpi]; 13837 /* put the first buffer into the first IOCBq */ 13838 first_iocbq->context2 = &seq_dmabuf->dbuf; 13839 first_iocbq->context3 = NULL; 13840 first_iocbq->iocb.ulpBdeCount = 1; 13841 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13842 LPFC_DATA_BUF_SIZE; 13843 first_iocbq->iocb.un.rcvels.remoteID = sid; 13844 tot_len = bf_get(lpfc_rcqe_length, 13845 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13846 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 13847 } 13848 iocbq = first_iocbq; 13849 /* 13850 * Each IOCBq can have two Buffers assigned, so go through the list 13851 * of buffers for this sequence and save two buffers in each IOCBq 13852 */ 13853 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 13854 if (!iocbq) { 13855 lpfc_in_buf_free(vport->phba, d_buf); 13856 continue; 13857 } 13858 if (!iocbq->context3) { 13859 iocbq->context3 = d_buf; 13860 iocbq->iocb.ulpBdeCount++; 13861 pbde = (struct ulp_bde64 *) 13862 &iocbq->iocb.unsli3.sli3Words[4]; 13863 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 13864 13865 /* We need to get the size out of the right CQE */ 13866 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 13867 len = bf_get(lpfc_rcqe_length, 13868 &hbq_buf->cq_event.cqe.rcqe_cmpl); 13869 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 13870 tot_len += len; 13871 } else { 13872 iocbq = lpfc_sli_get_iocbq(vport->phba); 13873 if (!iocbq) { 13874 if (first_iocbq) { 13875 first_iocbq->iocb.ulpStatus = 13876 IOSTAT_FCP_RSP_ERROR; 13877 first_iocbq->iocb.un.ulpWord[4] = 13878 IOERR_NO_RESOURCES; 13879 } 13880 lpfc_in_buf_free(vport->phba, d_buf); 13881 continue; 13882 } 13883 iocbq->context2 = d_buf; 13884 iocbq->context3 = NULL; 13885 iocbq->iocb.ulpBdeCount = 1; 13886 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13887 LPFC_DATA_BUF_SIZE; 13888 13889 /* We need to get the size out of the right CQE */ 13890 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 13891 len = bf_get(lpfc_rcqe_length, 13892 &hbq_buf->cq_event.cqe.rcqe_cmpl); 13893 tot_len += len; 13894 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 13895 13896 iocbq->iocb.un.rcvels.remoteID = sid; 13897 list_add_tail(&iocbq->list, &first_iocbq->list); 13898 } 13899 } 13900 return first_iocbq; 13901 } 13902 13903 static void 13904 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 13905 struct hbq_dmabuf *seq_dmabuf) 13906 { 13907 struct fc_frame_header *fc_hdr; 13908 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 13909 struct lpfc_hba *phba = vport->phba; 13910 13911 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 13912 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 13913 if (!iocbq) { 13914 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13915 "2707 Ring %d handler: Failed to allocate " 13916 "iocb Rctl x%x Type x%x received\n", 13917 LPFC_ELS_RING, 13918 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 13919 return; 13920 } 13921 if (!lpfc_complete_unsol_iocb(phba, 13922 &phba->sli.ring[LPFC_ELS_RING], 13923 iocbq, fc_hdr->fh_r_ctl, 13924 fc_hdr->fh_type)) 13925 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13926 "2540 Ring %d handler: unexpected Rctl " 13927 "x%x Type x%x received\n", 13928 LPFC_ELS_RING, 13929 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 13930 13931 /* Free iocb created in lpfc_prep_seq */ 13932 list_for_each_entry_safe(curr_iocb, next_iocb, 13933 &iocbq->list, list) { 13934 list_del_init(&curr_iocb->list); 13935 lpfc_sli_release_iocbq(phba, curr_iocb); 13936 } 13937 lpfc_sli_release_iocbq(phba, iocbq); 13938 } 13939 13940 /** 13941 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 13942 * @phba: Pointer to HBA context object. 13943 * 13944 * This function is called with no lock held. This function processes all 13945 * the received buffers and gives it to upper layers when a received buffer 13946 * indicates that it is the final frame in the sequence. The interrupt 13947 * service routine processes received buffers at interrupt contexts and adds 13948 * received dma buffers to the rb_pend_list queue and signals the worker thread. 13949 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 13950 * appropriate receive function when the final frame in a sequence is received. 13951 **/ 13952 void 13953 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 13954 struct hbq_dmabuf *dmabuf) 13955 { 13956 struct hbq_dmabuf *seq_dmabuf; 13957 struct fc_frame_header *fc_hdr; 13958 struct lpfc_vport *vport; 13959 uint32_t fcfi; 13960 13961 /* Process each received buffer */ 13962 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 13963 /* check to see if this a valid type of frame */ 13964 if (lpfc_fc_frame_check(phba, fc_hdr)) { 13965 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13966 return; 13967 } 13968 if ((bf_get(lpfc_cqe_code, 13969 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 13970 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 13971 &dmabuf->cq_event.cqe.rcqe_cmpl); 13972 else 13973 fcfi = bf_get(lpfc_rcqe_fcf_id, 13974 &dmabuf->cq_event.cqe.rcqe_cmpl); 13975 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 13976 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 13977 /* throw out the frame */ 13978 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13979 return; 13980 } 13981 /* Handle the basic abort sequence (BA_ABTS) event */ 13982 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 13983 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 13984 return; 13985 } 13986 13987 /* Link this frame */ 13988 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 13989 if (!seq_dmabuf) { 13990 /* unable to add frame to vport - throw it out */ 13991 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13992 return; 13993 } 13994 /* If not last frame in sequence continue processing frames. */ 13995 if (!lpfc_seq_complete(seq_dmabuf)) 13996 return; 13997 13998 /* Send the complete sequence to the upper layer protocol */ 13999 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 14000 } 14001 14002 /** 14003 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 14004 * @phba: pointer to lpfc hba data structure. 14005 * 14006 * This routine is invoked to post rpi header templates to the 14007 * HBA consistent with the SLI-4 interface spec. This routine 14008 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14009 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14010 * 14011 * This routine does not require any locks. It's usage is expected 14012 * to be driver load or reset recovery when the driver is 14013 * sequential. 14014 * 14015 * Return codes 14016 * 0 - successful 14017 * -EIO - The mailbox failed to complete successfully. 14018 * When this error occurs, the driver is not guaranteed 14019 * to have any rpi regions posted to the device and 14020 * must either attempt to repost the regions or take a 14021 * fatal error. 14022 **/ 14023 int 14024 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 14025 { 14026 struct lpfc_rpi_hdr *rpi_page; 14027 uint32_t rc = 0; 14028 uint16_t lrpi = 0; 14029 14030 /* SLI4 ports that support extents do not require RPI headers. */ 14031 if (!phba->sli4_hba.rpi_hdrs_in_use) 14032 goto exit; 14033 if (phba->sli4_hba.extents_in_use) 14034 return -EIO; 14035 14036 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 14037 /* 14038 * Assign the rpi headers a physical rpi only if the driver 14039 * has not initialized those resources. A port reset only 14040 * needs the headers posted. 14041 */ 14042 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 14043 LPFC_RPI_RSRC_RDY) 14044 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14045 14046 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 14047 if (rc != MBX_SUCCESS) { 14048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14049 "2008 Error %d posting all rpi " 14050 "headers\n", rc); 14051 rc = -EIO; 14052 break; 14053 } 14054 } 14055 14056 exit: 14057 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 14058 LPFC_RPI_RSRC_RDY); 14059 return rc; 14060 } 14061 14062 /** 14063 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 14064 * @phba: pointer to lpfc hba data structure. 14065 * @rpi_page: pointer to the rpi memory region. 14066 * 14067 * This routine is invoked to post a single rpi header to the 14068 * HBA consistent with the SLI-4 interface spec. This memory region 14069 * maps up to 64 rpi context regions. 14070 * 14071 * Return codes 14072 * 0 - successful 14073 * -ENOMEM - No available memory 14074 * -EIO - The mailbox failed to complete successfully. 14075 **/ 14076 int 14077 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 14078 { 14079 LPFC_MBOXQ_t *mboxq; 14080 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 14081 uint32_t rc = 0; 14082 uint32_t shdr_status, shdr_add_status; 14083 union lpfc_sli4_cfg_shdr *shdr; 14084 14085 /* SLI4 ports that support extents do not require RPI headers. */ 14086 if (!phba->sli4_hba.rpi_hdrs_in_use) 14087 return rc; 14088 if (phba->sli4_hba.extents_in_use) 14089 return -EIO; 14090 14091 /* The port is notified of the header region via a mailbox command. */ 14092 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14093 if (!mboxq) { 14094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14095 "2001 Unable to allocate memory for issuing " 14096 "SLI_CONFIG_SPECIAL mailbox command\n"); 14097 return -ENOMEM; 14098 } 14099 14100 /* Post all rpi memory regions to the port. */ 14101 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 14102 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14103 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 14104 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 14105 sizeof(struct lpfc_sli4_cfg_mhdr), 14106 LPFC_SLI4_MBX_EMBED); 14107 14108 14109 /* Post the physical rpi to the port for this rpi header. */ 14110 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 14111 rpi_page->start_rpi); 14112 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 14113 hdr_tmpl, rpi_page->page_count); 14114 14115 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 14116 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 14117 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 14118 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 14119 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14120 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14121 if (rc != MBX_TIMEOUT) 14122 mempool_free(mboxq, phba->mbox_mem_pool); 14123 if (shdr_status || shdr_add_status || rc) { 14124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14125 "2514 POST_RPI_HDR mailbox failed with " 14126 "status x%x add_status x%x, mbx status x%x\n", 14127 shdr_status, shdr_add_status, rc); 14128 rc = -ENXIO; 14129 } 14130 return rc; 14131 } 14132 14133 /** 14134 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 14135 * @phba: pointer to lpfc hba data structure. 14136 * 14137 * This routine is invoked to post rpi header templates to the 14138 * HBA consistent with the SLI-4 interface spec. This routine 14139 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14140 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14141 * 14142 * Returns 14143 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 14144 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 14145 **/ 14146 int 14147 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 14148 { 14149 unsigned long rpi; 14150 uint16_t max_rpi, rpi_limit; 14151 uint16_t rpi_remaining, lrpi = 0; 14152 struct lpfc_rpi_hdr *rpi_hdr; 14153 14154 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 14155 rpi_limit = phba->sli4_hba.next_rpi; 14156 14157 /* 14158 * Fetch the next logical rpi. Because this index is logical, 14159 * the driver starts at 0 each time. 14160 */ 14161 spin_lock_irq(&phba->hbalock); 14162 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 14163 if (rpi >= rpi_limit) 14164 rpi = LPFC_RPI_ALLOC_ERROR; 14165 else { 14166 set_bit(rpi, phba->sli4_hba.rpi_bmask); 14167 phba->sli4_hba.max_cfg_param.rpi_used++; 14168 phba->sli4_hba.rpi_count++; 14169 } 14170 14171 /* 14172 * Don't try to allocate more rpi header regions if the device limit 14173 * has been exhausted. 14174 */ 14175 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 14176 (phba->sli4_hba.rpi_count >= max_rpi)) { 14177 spin_unlock_irq(&phba->hbalock); 14178 return rpi; 14179 } 14180 14181 /* 14182 * RPI header postings are not required for SLI4 ports capable of 14183 * extents. 14184 */ 14185 if (!phba->sli4_hba.rpi_hdrs_in_use) { 14186 spin_unlock_irq(&phba->hbalock); 14187 return rpi; 14188 } 14189 14190 /* 14191 * If the driver is running low on rpi resources, allocate another 14192 * page now. Note that the next_rpi value is used because 14193 * it represents how many are actually in use whereas max_rpi notes 14194 * how many are supported max by the device. 14195 */ 14196 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 14197 spin_unlock_irq(&phba->hbalock); 14198 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 14199 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 14200 if (!rpi_hdr) { 14201 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14202 "2002 Error Could not grow rpi " 14203 "count\n"); 14204 } else { 14205 lrpi = rpi_hdr->start_rpi; 14206 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 14207 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 14208 } 14209 } 14210 14211 return rpi; 14212 } 14213 14214 /** 14215 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14216 * @phba: pointer to lpfc hba data structure. 14217 * 14218 * This routine is invoked to release an rpi to the pool of 14219 * available rpis maintained by the driver. 14220 **/ 14221 void 14222 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14223 { 14224 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 14225 phba->sli4_hba.rpi_count--; 14226 phba->sli4_hba.max_cfg_param.rpi_used--; 14227 } 14228 } 14229 14230 /** 14231 * lpfc_sli4_free_rpi - Release an rpi for reuse. 14232 * @phba: pointer to lpfc hba data structure. 14233 * 14234 * This routine is invoked to release an rpi to the pool of 14235 * available rpis maintained by the driver. 14236 **/ 14237 void 14238 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 14239 { 14240 spin_lock_irq(&phba->hbalock); 14241 __lpfc_sli4_free_rpi(phba, rpi); 14242 spin_unlock_irq(&phba->hbalock); 14243 } 14244 14245 /** 14246 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 14247 * @phba: pointer to lpfc hba data structure. 14248 * 14249 * This routine is invoked to remove the memory region that 14250 * provided rpi via a bitmask. 14251 **/ 14252 void 14253 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 14254 { 14255 kfree(phba->sli4_hba.rpi_bmask); 14256 kfree(phba->sli4_hba.rpi_ids); 14257 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 14258 } 14259 14260 /** 14261 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 14262 * @phba: pointer to lpfc hba data structure. 14263 * 14264 * This routine is invoked to remove the memory region that 14265 * provided rpi via a bitmask. 14266 **/ 14267 int 14268 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) 14269 { 14270 LPFC_MBOXQ_t *mboxq; 14271 struct lpfc_hba *phba = ndlp->phba; 14272 int rc; 14273 14274 /* The port is notified of the header region via a mailbox command. */ 14275 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14276 if (!mboxq) 14277 return -ENOMEM; 14278 14279 /* Post all rpi memory regions to the port. */ 14280 lpfc_resume_rpi(mboxq, ndlp); 14281 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14282 if (rc == MBX_NOT_FINISHED) { 14283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14284 "2010 Resume RPI Mailbox failed " 14285 "status %d, mbxStatus x%x\n", rc, 14286 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14287 mempool_free(mboxq, phba->mbox_mem_pool); 14288 return -EIO; 14289 } 14290 return 0; 14291 } 14292 14293 /** 14294 * lpfc_sli4_init_vpi - Initialize a vpi with the port 14295 * @vport: Pointer to the vport for which the vpi is being initialized 14296 * 14297 * This routine is invoked to activate a vpi with the port. 14298 * 14299 * Returns: 14300 * 0 success 14301 * -Evalue otherwise 14302 **/ 14303 int 14304 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 14305 { 14306 LPFC_MBOXQ_t *mboxq; 14307 int rc = 0; 14308 int retval = MBX_SUCCESS; 14309 uint32_t mbox_tmo; 14310 struct lpfc_hba *phba = vport->phba; 14311 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14312 if (!mboxq) 14313 return -ENOMEM; 14314 lpfc_init_vpi(phba, mboxq, vport->vpi); 14315 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 14316 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 14317 if (rc != MBX_SUCCESS) { 14318 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 14319 "2022 INIT VPI Mailbox failed " 14320 "status %d, mbxStatus x%x\n", rc, 14321 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 14322 retval = -EIO; 14323 } 14324 if (rc != MBX_TIMEOUT) 14325 mempool_free(mboxq, vport->phba->mbox_mem_pool); 14326 14327 return retval; 14328 } 14329 14330 /** 14331 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 14332 * @phba: pointer to lpfc hba data structure. 14333 * @mboxq: Pointer to mailbox object. 14334 * 14335 * This routine is invoked to manually add a single FCF record. The caller 14336 * must pass a completely initialized FCF_Record. This routine takes 14337 * care of the nonembedded mailbox operations. 14338 **/ 14339 static void 14340 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 14341 { 14342 void *virt_addr; 14343 union lpfc_sli4_cfg_shdr *shdr; 14344 uint32_t shdr_status, shdr_add_status; 14345 14346 virt_addr = mboxq->sge_array->addr[0]; 14347 /* The IOCTL status is embedded in the mailbox subheader. */ 14348 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 14349 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14350 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14351 14352 if ((shdr_status || shdr_add_status) && 14353 (shdr_status != STATUS_FCF_IN_USE)) 14354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14355 "2558 ADD_FCF_RECORD mailbox failed with " 14356 "status x%x add_status x%x\n", 14357 shdr_status, shdr_add_status); 14358 14359 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14360 } 14361 14362 /** 14363 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 14364 * @phba: pointer to lpfc hba data structure. 14365 * @fcf_record: pointer to the initialized fcf record to add. 14366 * 14367 * This routine is invoked to manually add a single FCF record. The caller 14368 * must pass a completely initialized FCF_Record. This routine takes 14369 * care of the nonembedded mailbox operations. 14370 **/ 14371 int 14372 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 14373 { 14374 int rc = 0; 14375 LPFC_MBOXQ_t *mboxq; 14376 uint8_t *bytep; 14377 void *virt_addr; 14378 dma_addr_t phys_addr; 14379 struct lpfc_mbx_sge sge; 14380 uint32_t alloc_len, req_len; 14381 uint32_t fcfindex; 14382 14383 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14384 if (!mboxq) { 14385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14386 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 14387 return -ENOMEM; 14388 } 14389 14390 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 14391 sizeof(uint32_t); 14392 14393 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14394 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 14395 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 14396 req_len, LPFC_SLI4_MBX_NEMBED); 14397 if (alloc_len < req_len) { 14398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14399 "2523 Allocated DMA memory size (x%x) is " 14400 "less than the requested DMA memory " 14401 "size (x%x)\n", alloc_len, req_len); 14402 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14403 return -ENOMEM; 14404 } 14405 14406 /* 14407 * Get the first SGE entry from the non-embedded DMA memory. This 14408 * routine only uses a single SGE. 14409 */ 14410 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 14411 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 14412 virt_addr = mboxq->sge_array->addr[0]; 14413 /* 14414 * Configure the FCF record for FCFI 0. This is the driver's 14415 * hardcoded default and gets used in nonFIP mode. 14416 */ 14417 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 14418 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 14419 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 14420 14421 /* 14422 * Copy the fcf_index and the FCF Record Data. The data starts after 14423 * the FCoE header plus word10. The data copy needs to be endian 14424 * correct. 14425 */ 14426 bytep += sizeof(uint32_t); 14427 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 14428 mboxq->vport = phba->pport; 14429 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 14430 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14431 if (rc == MBX_NOT_FINISHED) { 14432 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14433 "2515 ADD_FCF_RECORD mailbox failed with " 14434 "status 0x%x\n", rc); 14435 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14436 rc = -EIO; 14437 } else 14438 rc = 0; 14439 14440 return rc; 14441 } 14442 14443 /** 14444 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 14445 * @phba: pointer to lpfc hba data structure. 14446 * @fcf_record: pointer to the fcf record to write the default data. 14447 * @fcf_index: FCF table entry index. 14448 * 14449 * This routine is invoked to build the driver's default FCF record. The 14450 * values used are hardcoded. This routine handles memory initialization. 14451 * 14452 **/ 14453 void 14454 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 14455 struct fcf_record *fcf_record, 14456 uint16_t fcf_index) 14457 { 14458 memset(fcf_record, 0, sizeof(struct fcf_record)); 14459 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 14460 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 14461 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 14462 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 14463 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 14464 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 14465 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 14466 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 14467 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 14468 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 14469 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 14470 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 14471 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 14472 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 14473 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 14474 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 14475 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 14476 /* Set the VLAN bit map */ 14477 if (phba->valid_vlan) { 14478 fcf_record->vlan_bitmap[phba->vlan_id / 8] 14479 = 1 << (phba->vlan_id % 8); 14480 } 14481 } 14482 14483 /** 14484 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 14485 * @phba: pointer to lpfc hba data structure. 14486 * @fcf_index: FCF table entry offset. 14487 * 14488 * This routine is invoked to scan the entire FCF table by reading FCF 14489 * record and processing it one at a time starting from the @fcf_index 14490 * for initial FCF discovery or fast FCF failover rediscovery. 14491 * 14492 * Return 0 if the mailbox command is submitted successfully, none 0 14493 * otherwise. 14494 **/ 14495 int 14496 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 14497 { 14498 int rc = 0, error; 14499 LPFC_MBOXQ_t *mboxq; 14500 14501 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 14502 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14503 if (!mboxq) { 14504 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14505 "2000 Failed to allocate mbox for " 14506 "READ_FCF cmd\n"); 14507 error = -ENOMEM; 14508 goto fail_fcf_scan; 14509 } 14510 /* Construct the read FCF record mailbox command */ 14511 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 14512 if (rc) { 14513 error = -EINVAL; 14514 goto fail_fcf_scan; 14515 } 14516 /* Issue the mailbox command asynchronously */ 14517 mboxq->vport = phba->pport; 14518 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 14519 14520 spin_lock_irq(&phba->hbalock); 14521 phba->hba_flag |= FCF_TS_INPROG; 14522 spin_unlock_irq(&phba->hbalock); 14523 14524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14525 if (rc == MBX_NOT_FINISHED) 14526 error = -EIO; 14527 else { 14528 /* Reset eligible FCF count for new scan */ 14529 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 14530 phba->fcf.eligible_fcf_cnt = 0; 14531 error = 0; 14532 } 14533 fail_fcf_scan: 14534 if (error) { 14535 if (mboxq) 14536 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14537 /* FCF scan failed, clear FCF_TS_INPROG flag */ 14538 spin_lock_irq(&phba->hbalock); 14539 phba->hba_flag &= ~FCF_TS_INPROG; 14540 spin_unlock_irq(&phba->hbalock); 14541 } 14542 return error; 14543 } 14544 14545 /** 14546 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 14547 * @phba: pointer to lpfc hba data structure. 14548 * @fcf_index: FCF table entry offset. 14549 * 14550 * This routine is invoked to read an FCF record indicated by @fcf_index 14551 * and to use it for FLOGI roundrobin FCF failover. 14552 * 14553 * Return 0 if the mailbox command is submitted successfully, none 0 14554 * otherwise. 14555 **/ 14556 int 14557 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 14558 { 14559 int rc = 0, error; 14560 LPFC_MBOXQ_t *mboxq; 14561 14562 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14563 if (!mboxq) { 14564 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 14565 "2763 Failed to allocate mbox for " 14566 "READ_FCF cmd\n"); 14567 error = -ENOMEM; 14568 goto fail_fcf_read; 14569 } 14570 /* Construct the read FCF record mailbox command */ 14571 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 14572 if (rc) { 14573 error = -EINVAL; 14574 goto fail_fcf_read; 14575 } 14576 /* Issue the mailbox command asynchronously */ 14577 mboxq->vport = phba->pport; 14578 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 14579 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14580 if (rc == MBX_NOT_FINISHED) 14581 error = -EIO; 14582 else 14583 error = 0; 14584 14585 fail_fcf_read: 14586 if (error && mboxq) 14587 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14588 return error; 14589 } 14590 14591 /** 14592 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 14593 * @phba: pointer to lpfc hba data structure. 14594 * @fcf_index: FCF table entry offset. 14595 * 14596 * This routine is invoked to read an FCF record indicated by @fcf_index to 14597 * determine whether it's eligible for FLOGI roundrobin failover list. 14598 * 14599 * Return 0 if the mailbox command is submitted successfully, none 0 14600 * otherwise. 14601 **/ 14602 int 14603 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 14604 { 14605 int rc = 0, error; 14606 LPFC_MBOXQ_t *mboxq; 14607 14608 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14609 if (!mboxq) { 14610 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 14611 "2758 Failed to allocate mbox for " 14612 "READ_FCF cmd\n"); 14613 error = -ENOMEM; 14614 goto fail_fcf_read; 14615 } 14616 /* Construct the read FCF record mailbox command */ 14617 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 14618 if (rc) { 14619 error = -EINVAL; 14620 goto fail_fcf_read; 14621 } 14622 /* Issue the mailbox command asynchronously */ 14623 mboxq->vport = phba->pport; 14624 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 14625 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14626 if (rc == MBX_NOT_FINISHED) 14627 error = -EIO; 14628 else 14629 error = 0; 14630 14631 fail_fcf_read: 14632 if (error && mboxq) 14633 lpfc_sli4_mbox_cmd_free(phba, mboxq); 14634 return error; 14635 } 14636 14637 /** 14638 * lpfc_check_next_fcf_pri 14639 * phba pointer to the lpfc_hba struct for this port. 14640 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 14641 * routine when the rr_bmask is empty. The FCF indecies are put into the 14642 * rr_bmask based on their priority level. Starting from the highest priority 14643 * to the lowest. The most likely FCF candidate will be in the highest 14644 * priority group. When this routine is called it searches the fcf_pri list for 14645 * next lowest priority group and repopulates the rr_bmask with only those 14646 * fcf_indexes. 14647 * returns: 14648 * 1=success 0=failure 14649 **/ 14650 int 14651 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 14652 { 14653 uint16_t next_fcf_pri; 14654 uint16_t last_index; 14655 struct lpfc_fcf_pri *fcf_pri; 14656 int rc; 14657 int ret = 0; 14658 14659 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 14660 LPFC_SLI4_FCF_TBL_INDX_MAX); 14661 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14662 "3060 Last IDX %d\n", last_index); 14663 if (list_empty(&phba->fcf.fcf_pri_list)) { 14664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14665 "3061 Last IDX %d\n", last_index); 14666 return 0; /* Empty rr list */ 14667 } 14668 next_fcf_pri = 0; 14669 /* 14670 * Clear the rr_bmask and set all of the bits that are at this 14671 * priority. 14672 */ 14673 memset(phba->fcf.fcf_rr_bmask, 0, 14674 sizeof(*phba->fcf.fcf_rr_bmask)); 14675 spin_lock_irq(&phba->hbalock); 14676 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 14677 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 14678 continue; 14679 /* 14680 * the 1st priority that has not FLOGI failed 14681 * will be the highest. 14682 */ 14683 if (!next_fcf_pri) 14684 next_fcf_pri = fcf_pri->fcf_rec.priority; 14685 spin_unlock_irq(&phba->hbalock); 14686 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 14687 rc = lpfc_sli4_fcf_rr_index_set(phba, 14688 fcf_pri->fcf_rec.fcf_index); 14689 if (rc) 14690 return 0; 14691 } 14692 spin_lock_irq(&phba->hbalock); 14693 } 14694 /* 14695 * if next_fcf_pri was not set above and the list is not empty then 14696 * we have failed flogis on all of them. So reset flogi failed 14697 * and start at the begining. 14698 */ 14699 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 14700 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 14701 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 14702 /* 14703 * the 1st priority that has not FLOGI failed 14704 * will be the highest. 14705 */ 14706 if (!next_fcf_pri) 14707 next_fcf_pri = fcf_pri->fcf_rec.priority; 14708 spin_unlock_irq(&phba->hbalock); 14709 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 14710 rc = lpfc_sli4_fcf_rr_index_set(phba, 14711 fcf_pri->fcf_rec.fcf_index); 14712 if (rc) 14713 return 0; 14714 } 14715 spin_lock_irq(&phba->hbalock); 14716 } 14717 } else 14718 ret = 1; 14719 spin_unlock_irq(&phba->hbalock); 14720 14721 return ret; 14722 } 14723 /** 14724 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 14725 * @phba: pointer to lpfc hba data structure. 14726 * 14727 * This routine is to get the next eligible FCF record index in a round 14728 * robin fashion. If the next eligible FCF record index equals to the 14729 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 14730 * shall be returned, otherwise, the next eligible FCF record's index 14731 * shall be returned. 14732 **/ 14733 uint16_t 14734 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 14735 { 14736 uint16_t next_fcf_index; 14737 14738 /* Search start from next bit of currently registered FCF index */ 14739 next_priority: 14740 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 14741 LPFC_SLI4_FCF_TBL_INDX_MAX; 14742 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14743 LPFC_SLI4_FCF_TBL_INDX_MAX, 14744 next_fcf_index); 14745 14746 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 14747 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14748 /* 14749 * If we have wrapped then we need to clear the bits that 14750 * have been tested so that we can detect when we should 14751 * change the priority level. 14752 */ 14753 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14754 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 14755 } 14756 14757 14758 /* Check roundrobin failover list empty condition */ 14759 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 14760 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 14761 /* 14762 * If next fcf index is not found check if there are lower 14763 * Priority level fcf's in the fcf_priority list. 14764 * Set up the rr_bmask with all of the avaiable fcf bits 14765 * at that level and continue the selection process. 14766 */ 14767 if (lpfc_check_next_fcf_pri_level(phba)) 14768 goto next_priority; 14769 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 14770 "2844 No roundrobin failover FCF available\n"); 14771 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 14772 return LPFC_FCOE_FCF_NEXT_NONE; 14773 else { 14774 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 14775 "3063 Only FCF available idx %d, flag %x\n", 14776 next_fcf_index, 14777 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 14778 return next_fcf_index; 14779 } 14780 } 14781 14782 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 14783 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 14784 LPFC_FCF_FLOGI_FAILED) 14785 goto next_priority; 14786 14787 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14788 "2845 Get next roundrobin failover FCF (x%x)\n", 14789 next_fcf_index); 14790 14791 return next_fcf_index; 14792 } 14793 14794 /** 14795 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 14796 * @phba: pointer to lpfc hba data structure. 14797 * 14798 * This routine sets the FCF record index in to the eligible bmask for 14799 * roundrobin failover search. It checks to make sure that the index 14800 * does not go beyond the range of the driver allocated bmask dimension 14801 * before setting the bit. 14802 * 14803 * Returns 0 if the index bit successfully set, otherwise, it returns 14804 * -EINVAL. 14805 **/ 14806 int 14807 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 14808 { 14809 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14810 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14811 "2610 FCF (x%x) reached driver's book " 14812 "keeping dimension:x%x\n", 14813 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 14814 return -EINVAL; 14815 } 14816 /* Set the eligible FCF record index bmask */ 14817 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 14818 14819 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14820 "2790 Set FCF (x%x) to roundrobin FCF failover " 14821 "bmask\n", fcf_index); 14822 14823 return 0; 14824 } 14825 14826 /** 14827 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 14828 * @phba: pointer to lpfc hba data structure. 14829 * 14830 * This routine clears the FCF record index from the eligible bmask for 14831 * roundrobin failover search. It checks to make sure that the index 14832 * does not go beyond the range of the driver allocated bmask dimension 14833 * before clearing the bit. 14834 **/ 14835 void 14836 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 14837 { 14838 struct lpfc_fcf_pri *fcf_pri; 14839 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14840 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14841 "2762 FCF (x%x) reached driver's book " 14842 "keeping dimension:x%x\n", 14843 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 14844 return; 14845 } 14846 /* Clear the eligible FCF record index bmask */ 14847 spin_lock_irq(&phba->hbalock); 14848 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 14849 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 14850 list_del_init(&fcf_pri->list); 14851 break; 14852 } 14853 } 14854 spin_unlock_irq(&phba->hbalock); 14855 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 14856 14857 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14858 "2791 Clear FCF (x%x) from roundrobin failover " 14859 "bmask\n", fcf_index); 14860 } 14861 14862 /** 14863 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 14864 * @phba: pointer to lpfc hba data structure. 14865 * 14866 * This routine is the completion routine for the rediscover FCF table mailbox 14867 * command. If the mailbox command returned failure, it will try to stop the 14868 * FCF rediscover wait timer. 14869 **/ 14870 void 14871 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 14872 { 14873 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 14874 uint32_t shdr_status, shdr_add_status; 14875 14876 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 14877 14878 shdr_status = bf_get(lpfc_mbox_hdr_status, 14879 &redisc_fcf->header.cfg_shdr.response); 14880 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 14881 &redisc_fcf->header.cfg_shdr.response); 14882 if (shdr_status || shdr_add_status) { 14883 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14884 "2746 Requesting for FCF rediscovery failed " 14885 "status x%x add_status x%x\n", 14886 shdr_status, shdr_add_status); 14887 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 14888 spin_lock_irq(&phba->hbalock); 14889 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 14890 spin_unlock_irq(&phba->hbalock); 14891 /* 14892 * CVL event triggered FCF rediscover request failed, 14893 * last resort to re-try current registered FCF entry. 14894 */ 14895 lpfc_retry_pport_discovery(phba); 14896 } else { 14897 spin_lock_irq(&phba->hbalock); 14898 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 14899 spin_unlock_irq(&phba->hbalock); 14900 /* 14901 * DEAD FCF event triggered FCF rediscover request 14902 * failed, last resort to fail over as a link down 14903 * to FCF registration. 14904 */ 14905 lpfc_sli4_fcf_dead_failthrough(phba); 14906 } 14907 } else { 14908 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14909 "2775 Start FCF rediscover quiescent timer\n"); 14910 /* 14911 * Start FCF rediscovery wait timer for pending FCF 14912 * before rescan FCF record table. 14913 */ 14914 lpfc_fcf_redisc_wait_start_timer(phba); 14915 } 14916 14917 mempool_free(mbox, phba->mbox_mem_pool); 14918 } 14919 14920 /** 14921 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 14922 * @phba: pointer to lpfc hba data structure. 14923 * 14924 * This routine is invoked to request for rediscovery of the entire FCF table 14925 * by the port. 14926 **/ 14927 int 14928 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 14929 { 14930 LPFC_MBOXQ_t *mbox; 14931 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 14932 int rc, length; 14933 14934 /* Cancel retry delay timers to all vports before FCF rediscover */ 14935 lpfc_cancel_all_vport_retry_delay_timer(phba); 14936 14937 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14938 if (!mbox) { 14939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14940 "2745 Failed to allocate mbox for " 14941 "requesting FCF rediscover.\n"); 14942 return -ENOMEM; 14943 } 14944 14945 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 14946 sizeof(struct lpfc_sli4_cfg_mhdr)); 14947 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14948 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 14949 length, LPFC_SLI4_MBX_EMBED); 14950 14951 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 14952 /* Set count to 0 for invalidating the entire FCF database */ 14953 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 14954 14955 /* Issue the mailbox command asynchronously */ 14956 mbox->vport = phba->pport; 14957 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 14958 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 14959 14960 if (rc == MBX_NOT_FINISHED) { 14961 mempool_free(mbox, phba->mbox_mem_pool); 14962 return -EIO; 14963 } 14964 return 0; 14965 } 14966 14967 /** 14968 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 14969 * @phba: pointer to lpfc hba data structure. 14970 * 14971 * This function is the failover routine as a last resort to the FCF DEAD 14972 * event when driver failed to perform fast FCF failover. 14973 **/ 14974 void 14975 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 14976 { 14977 uint32_t link_state; 14978 14979 /* 14980 * Last resort as FCF DEAD event failover will treat this as 14981 * a link down, but save the link state because we don't want 14982 * it to be changed to Link Down unless it is already down. 14983 */ 14984 link_state = phba->link_state; 14985 lpfc_linkdown(phba); 14986 phba->link_state = link_state; 14987 14988 /* Unregister FCF if no devices connected to it */ 14989 lpfc_unregister_unused_fcf(phba); 14990 } 14991 14992 /** 14993 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 14994 * @phba: pointer to lpfc hba data structure. 14995 * 14996 * This function read region 23 and parse TLV for port status to 14997 * decide if the user disaled the port. If the TLV indicates the 14998 * port is disabled, the hba_flag is set accordingly. 14999 **/ 15000 void 15001 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 15002 { 15003 LPFC_MBOXQ_t *pmb = NULL; 15004 MAILBOX_t *mb; 15005 uint8_t *rgn23_data = NULL; 15006 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset; 15007 int rc; 15008 15009 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15010 if (!pmb) { 15011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15012 "2600 lpfc_sli_read_serdes_param failed to" 15013 " allocate mailbox memory\n"); 15014 goto out; 15015 } 15016 mb = &pmb->u.mb; 15017 15018 /* Get adapter Region 23 data */ 15019 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 15020 if (!rgn23_data) 15021 goto out; 15022 15023 do { 15024 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 15025 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 15026 15027 if (rc != MBX_SUCCESS) { 15028 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15029 "2601 lpfc_sli_read_link_ste failed to" 15030 " read config region 23 rc 0x%x Status 0x%x\n", 15031 rc, mb->mbxStatus); 15032 mb->un.varDmp.word_cnt = 0; 15033 } 15034 /* 15035 * dump mem may return a zero when finished or we got a 15036 * mailbox error, either way we are done. 15037 */ 15038 if (mb->un.varDmp.word_cnt == 0) 15039 break; 15040 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 15041 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 15042 15043 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 15044 rgn23_data + offset, 15045 mb->un.varDmp.word_cnt); 15046 offset += mb->un.varDmp.word_cnt; 15047 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 15048 15049 data_size = offset; 15050 offset = 0; 15051 15052 if (!data_size) 15053 goto out; 15054 15055 /* Check the region signature first */ 15056 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 15057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15058 "2619 Config region 23 has bad signature\n"); 15059 goto out; 15060 } 15061 offset += 4; 15062 15063 /* Check the data structure version */ 15064 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 15065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15066 "2620 Config region 23 has bad version\n"); 15067 goto out; 15068 } 15069 offset += 4; 15070 15071 /* Parse TLV entries in the region */ 15072 while (offset < data_size) { 15073 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 15074 break; 15075 /* 15076 * If the TLV is not driver specific TLV or driver id is 15077 * not linux driver id, skip the record. 15078 */ 15079 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 15080 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 15081 (rgn23_data[offset + 3] != 0)) { 15082 offset += rgn23_data[offset + 1] * 4 + 4; 15083 continue; 15084 } 15085 15086 /* Driver found a driver specific TLV in the config region */ 15087 sub_tlv_len = rgn23_data[offset + 1] * 4; 15088 offset += 4; 15089 tlv_offset = 0; 15090 15091 /* 15092 * Search for configured port state sub-TLV. 15093 */ 15094 while ((offset < data_size) && 15095 (tlv_offset < sub_tlv_len)) { 15096 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 15097 offset += 4; 15098 tlv_offset += 4; 15099 break; 15100 } 15101 if (rgn23_data[offset] != PORT_STE_TYPE) { 15102 offset += rgn23_data[offset + 1] * 4 + 4; 15103 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 15104 continue; 15105 } 15106 15107 /* This HBA contains PORT_STE configured */ 15108 if (!rgn23_data[offset + 2]) 15109 phba->hba_flag |= LINK_DISABLED; 15110 15111 goto out; 15112 } 15113 } 15114 out: 15115 if (pmb) 15116 mempool_free(pmb, phba->mbox_mem_pool); 15117 kfree(rgn23_data); 15118 return; 15119 } 15120 15121 /** 15122 * lpfc_wr_object - write an object to the firmware 15123 * @phba: HBA structure that indicates port to create a queue on. 15124 * @dmabuf_list: list of dmabufs to write to the port. 15125 * @size: the total byte value of the objects to write to the port. 15126 * @offset: the current offset to be used to start the transfer. 15127 * 15128 * This routine will create a wr_object mailbox command to send to the port. 15129 * the mailbox command will be constructed using the dma buffers described in 15130 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 15131 * BDEs that the imbedded mailbox can support. The @offset variable will be 15132 * used to indicate the starting offset of the transfer and will also return 15133 * the offset after the write object mailbox has completed. @size is used to 15134 * determine the end of the object and whether the eof bit should be set. 15135 * 15136 * Return 0 is successful and offset will contain the the new offset to use 15137 * for the next write. 15138 * Return negative value for error cases. 15139 **/ 15140 int 15141 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 15142 uint32_t size, uint32_t *offset) 15143 { 15144 struct lpfc_mbx_wr_object *wr_object; 15145 LPFC_MBOXQ_t *mbox; 15146 int rc = 0, i = 0; 15147 uint32_t shdr_status, shdr_add_status; 15148 uint32_t mbox_tmo; 15149 union lpfc_sli4_cfg_shdr *shdr; 15150 struct lpfc_dmabuf *dmabuf; 15151 uint32_t written = 0; 15152 15153 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15154 if (!mbox) 15155 return -ENOMEM; 15156 15157 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15158 LPFC_MBOX_OPCODE_WRITE_OBJECT, 15159 sizeof(struct lpfc_mbx_wr_object) - 15160 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 15161 15162 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 15163 wr_object->u.request.write_offset = *offset; 15164 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 15165 wr_object->u.request.object_name[0] = 15166 cpu_to_le32(wr_object->u.request.object_name[0]); 15167 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 15168 list_for_each_entry(dmabuf, dmabuf_list, list) { 15169 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 15170 break; 15171 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 15172 wr_object->u.request.bde[i].addrHigh = 15173 putPaddrHigh(dmabuf->phys); 15174 if (written + SLI4_PAGE_SIZE >= size) { 15175 wr_object->u.request.bde[i].tus.f.bdeSize = 15176 (size - written); 15177 written += (size - written); 15178 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 15179 } else { 15180 wr_object->u.request.bde[i].tus.f.bdeSize = 15181 SLI4_PAGE_SIZE; 15182 written += SLI4_PAGE_SIZE; 15183 } 15184 i++; 15185 } 15186 wr_object->u.request.bde_count = i; 15187 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 15188 if (!phba->sli4_hba.intr_enable) 15189 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15190 else { 15191 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 15192 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 15193 } 15194 /* The IOCTL status is embedded in the mailbox subheader. */ 15195 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 15196 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15197 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15198 if (rc != MBX_TIMEOUT) 15199 mempool_free(mbox, phba->mbox_mem_pool); 15200 if (shdr_status || shdr_add_status || rc) { 15201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15202 "3025 Write Object mailbox failed with " 15203 "status x%x add_status x%x, mbx status x%x\n", 15204 shdr_status, shdr_add_status, rc); 15205 rc = -ENXIO; 15206 } else 15207 *offset += wr_object->u.response.actual_write_length; 15208 return rc; 15209 } 15210 15211 /** 15212 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 15213 * @vport: pointer to vport data structure. 15214 * 15215 * This function iterate through the mailboxq and clean up all REG_LOGIN 15216 * and REG_VPI mailbox commands associated with the vport. This function 15217 * is called when driver want to restart discovery of the vport due to 15218 * a Clear Virtual Link event. 15219 **/ 15220 void 15221 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 15222 { 15223 struct lpfc_hba *phba = vport->phba; 15224 LPFC_MBOXQ_t *mb, *nextmb; 15225 struct lpfc_dmabuf *mp; 15226 struct lpfc_nodelist *ndlp; 15227 struct lpfc_nodelist *act_mbx_ndlp = NULL; 15228 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 15229 LIST_HEAD(mbox_cmd_list); 15230 uint8_t restart_loop; 15231 15232 /* Clean up internally queued mailbox commands with the vport */ 15233 spin_lock_irq(&phba->hbalock); 15234 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 15235 if (mb->vport != vport) 15236 continue; 15237 15238 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15239 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15240 continue; 15241 15242 list_del(&mb->list); 15243 list_add_tail(&mb->list, &mbox_cmd_list); 15244 } 15245 /* Clean up active mailbox command with the vport */ 15246 mb = phba->sli.mbox_active; 15247 if (mb && (mb->vport == vport)) { 15248 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 15249 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 15250 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15251 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15252 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 15253 /* Put reference count for delayed processing */ 15254 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 15255 /* Unregister the RPI when mailbox complete */ 15256 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15257 } 15258 } 15259 /* Cleanup any mailbox completions which are not yet processed */ 15260 do { 15261 restart_loop = 0; 15262 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 15263 /* 15264 * If this mailox is already processed or it is 15265 * for another vport ignore it. 15266 */ 15267 if ((mb->vport != vport) || 15268 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 15269 continue; 15270 15271 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 15272 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 15273 continue; 15274 15275 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15276 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15277 ndlp = (struct lpfc_nodelist *)mb->context2; 15278 /* Unregister the RPI when mailbox complete */ 15279 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 15280 restart_loop = 1; 15281 spin_unlock_irq(&phba->hbalock); 15282 spin_lock(shost->host_lock); 15283 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15284 spin_unlock(shost->host_lock); 15285 spin_lock_irq(&phba->hbalock); 15286 break; 15287 } 15288 } 15289 } while (restart_loop); 15290 15291 spin_unlock_irq(&phba->hbalock); 15292 15293 /* Release the cleaned-up mailbox commands */ 15294 while (!list_empty(&mbox_cmd_list)) { 15295 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 15296 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 15297 mp = (struct lpfc_dmabuf *) (mb->context1); 15298 if (mp) { 15299 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 15300 kfree(mp); 15301 } 15302 ndlp = (struct lpfc_nodelist *) mb->context2; 15303 mb->context2 = NULL; 15304 if (ndlp) { 15305 spin_lock(shost->host_lock); 15306 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15307 spin_unlock(shost->host_lock); 15308 lpfc_nlp_put(ndlp); 15309 } 15310 } 15311 mempool_free(mb, phba->mbox_mem_pool); 15312 } 15313 15314 /* Release the ndlp with the cleaned-up active mailbox command */ 15315 if (act_mbx_ndlp) { 15316 spin_lock(shost->host_lock); 15317 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 15318 spin_unlock(shost->host_lock); 15319 lpfc_nlp_put(act_mbx_ndlp); 15320 } 15321 } 15322 15323 /** 15324 * lpfc_drain_txq - Drain the txq 15325 * @phba: Pointer to HBA context object. 15326 * 15327 * This function attempt to submit IOCBs on the txq 15328 * to the adapter. For SLI4 adapters, the txq contains 15329 * ELS IOCBs that have been deferred because the there 15330 * are no SGLs. This congestion can occur with large 15331 * vport counts during node discovery. 15332 **/ 15333 15334 uint32_t 15335 lpfc_drain_txq(struct lpfc_hba *phba) 15336 { 15337 LIST_HEAD(completions); 15338 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 15339 struct lpfc_iocbq *piocbq = 0; 15340 unsigned long iflags = 0; 15341 char *fail_msg = NULL; 15342 struct lpfc_sglq *sglq; 15343 union lpfc_wqe wqe; 15344 15345 spin_lock_irqsave(&phba->hbalock, iflags); 15346 if (pring->txq_cnt > pring->txq_max) 15347 pring->txq_max = pring->txq_cnt; 15348 15349 spin_unlock_irqrestore(&phba->hbalock, iflags); 15350 15351 while (pring->txq_cnt) { 15352 spin_lock_irqsave(&phba->hbalock, iflags); 15353 15354 piocbq = lpfc_sli_ringtx_get(phba, pring); 15355 sglq = __lpfc_sli_get_sglq(phba, piocbq); 15356 if (!sglq) { 15357 __lpfc_sli_ringtx_put(phba, pring, piocbq); 15358 spin_unlock_irqrestore(&phba->hbalock, iflags); 15359 break; 15360 } else { 15361 if (!piocbq) { 15362 /* The txq_cnt out of sync. This should 15363 * never happen 15364 */ 15365 sglq = __lpfc_clear_active_sglq(phba, 15366 sglq->sli4_lxritag); 15367 spin_unlock_irqrestore(&phba->hbalock, iflags); 15368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15369 "2823 txq empty and txq_cnt is %d\n ", 15370 pring->txq_cnt); 15371 break; 15372 } 15373 } 15374 15375 /* The xri and iocb resources secured, 15376 * attempt to issue request 15377 */ 15378 piocbq->sli4_lxritag = sglq->sli4_lxritag; 15379 piocbq->sli4_xritag = sglq->sli4_xritag; 15380 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 15381 fail_msg = "to convert bpl to sgl"; 15382 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 15383 fail_msg = "to convert iocb to wqe"; 15384 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 15385 fail_msg = " - Wq is full"; 15386 else 15387 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 15388 15389 if (fail_msg) { 15390 /* Failed means we can't issue and need to cancel */ 15391 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15392 "2822 IOCB failed %s iotag 0x%x " 15393 "xri 0x%x\n", 15394 fail_msg, 15395 piocbq->iotag, piocbq->sli4_xritag); 15396 list_add_tail(&piocbq->list, &completions); 15397 } 15398 spin_unlock_irqrestore(&phba->hbalock, iflags); 15399 } 15400 15401 /* Cancel all the IOCBs that cannot be issued */ 15402 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 15403 IOERR_SLI_ABORTED); 15404 15405 return pring->txq_cnt; 15406 } 15407