1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static IOCB_t * 69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 70 { 71 return &iocbq->iocb; 72 } 73 74 /** 75 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 76 * @q: The Work Queue to operate on. 77 * @wqe: The work Queue Entry to put on the Work queue. 78 * 79 * This routine will copy the contents of @wqe to the next available entry on 80 * the @q. This function will then ring the Work Queue Doorbell to signal the 81 * HBA to start processing the Work Queue Entry. This function returns 0 if 82 * successful. If no entries are available on @q then this function will return 83 * -ENOMEM. 84 * The caller is expected to hold the hbalock when calling this routine. 85 **/ 86 static uint32_t 87 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 88 { 89 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; 90 struct lpfc_register doorbell; 91 uint32_t host_index; 92 93 /* If the host has not yet processed the next entry then we are done */ 94 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 95 return -ENOMEM; 96 /* set consumption flag every once in a while */ 97 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) 98 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 99 100 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 101 102 /* Update the host index before invoking device */ 103 host_index = q->host_index; 104 q->host_index = ((q->host_index + 1) % q->entry_count); 105 106 /* Ring Doorbell */ 107 doorbell.word0 = 0; 108 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 109 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 110 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 111 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 112 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ 113 114 return 0; 115 } 116 117 /** 118 * lpfc_sli4_wq_release - Updates internal hba index for WQ 119 * @q: The Work Queue to operate on. 120 * @index: The index to advance the hba index to. 121 * 122 * This routine will update the HBA index of a queue to reflect consumption of 123 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 124 * an entry the host calls this function to update the queue's internal 125 * pointers. This routine returns the number of entries that were consumed by 126 * the HBA. 127 **/ 128 static uint32_t 129 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 130 { 131 uint32_t released = 0; 132 133 if (q->hba_index == index) 134 return 0; 135 do { 136 q->hba_index = ((q->hba_index + 1) % q->entry_count); 137 released++; 138 } while (q->hba_index != index); 139 return released; 140 } 141 142 /** 143 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 144 * @q: The Mailbox Queue to operate on. 145 * @wqe: The Mailbox Queue Entry to put on the Work queue. 146 * 147 * This routine will copy the contents of @mqe to the next available entry on 148 * the @q. This function will then ring the Work Queue Doorbell to signal the 149 * HBA to start processing the Work Queue Entry. This function returns 0 if 150 * successful. If no entries are available on @q then this function will return 151 * -ENOMEM. 152 * The caller is expected to hold the hbalock when calling this routine. 153 **/ 154 static uint32_t 155 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 156 { 157 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; 158 struct lpfc_register doorbell; 159 uint32_t host_index; 160 161 /* If the host has not yet processed the next entry then we are done */ 162 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 163 return -ENOMEM; 164 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 165 /* Save off the mailbox pointer for completion */ 166 q->phba->mbox = (MAILBOX_t *)temp_mqe; 167 168 /* Update the host index before invoking device */ 169 host_index = q->host_index; 170 q->host_index = ((q->host_index + 1) % q->entry_count); 171 172 /* Ring Doorbell */ 173 doorbell.word0 = 0; 174 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 175 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 176 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 177 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ 178 return 0; 179 } 180 181 /** 182 * lpfc_sli4_mq_release - Updates internal hba index for MQ 183 * @q: The Mailbox Queue to operate on. 184 * 185 * This routine will update the HBA index of a queue to reflect consumption of 186 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 187 * an entry the host calls this function to update the queue's internal 188 * pointers. This routine returns the number of entries that were consumed by 189 * the HBA. 190 **/ 191 static uint32_t 192 lpfc_sli4_mq_release(struct lpfc_queue *q) 193 { 194 /* Clear the mailbox pointer for completion */ 195 q->phba->mbox = NULL; 196 q->hba_index = ((q->hba_index + 1) % q->entry_count); 197 return 1; 198 } 199 200 /** 201 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 202 * @q: The Event Queue to get the first valid EQE from 203 * 204 * This routine will get the first valid Event Queue Entry from @q, update 205 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 206 * the Queue (no more work to do), or the Queue is full of EQEs that have been 207 * processed, but not popped back to the HBA then this routine will return NULL. 208 **/ 209 static struct lpfc_eqe * 210 lpfc_sli4_eq_get(struct lpfc_queue *q) 211 { 212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 213 214 /* If the next EQE is not valid then we are done */ 215 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 216 return NULL; 217 /* If the host has not yet processed the next entry then we are done */ 218 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 219 return NULL; 220 221 q->hba_index = ((q->hba_index + 1) % q->entry_count); 222 return eqe; 223 } 224 225 /** 226 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 227 * @q: The Event Queue that the host has completed processing for. 228 * @arm: Indicates whether the host wants to arms this CQ. 229 * 230 * This routine will mark all Event Queue Entries on @q, from the last 231 * known completed entry to the last entry that was processed, as completed 232 * by clearing the valid bit for each completion queue entry. Then it will 233 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 234 * The internal host index in the @q will be updated by this routine to indicate 235 * that the host has finished processing the entries. The @arm parameter 236 * indicates that the queue should be rearmed when ringing the doorbell. 237 * 238 * This function will return the number of EQEs that were popped. 239 **/ 240 uint32_t 241 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 242 { 243 uint32_t released = 0; 244 struct lpfc_eqe *temp_eqe; 245 struct lpfc_register doorbell; 246 247 /* while there are valid entries */ 248 while (q->hba_index != q->host_index) { 249 temp_eqe = q->qe[q->host_index].eqe; 250 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 251 released++; 252 q->host_index = ((q->host_index + 1) % q->entry_count); 253 } 254 if (unlikely(released == 0 && !arm)) 255 return 0; 256 257 /* ring doorbell for number popped */ 258 doorbell.word0 = 0; 259 if (arm) { 260 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 261 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 262 } 263 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 264 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 265 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 266 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 268 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 269 readl(q->phba->sli4_hba.EQCQDBregaddr); 270 return released; 271 } 272 273 /** 274 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 275 * @q: The Completion Queue to get the first valid CQE from 276 * 277 * This routine will get the first valid Completion Queue Entry from @q, update 278 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 279 * the Queue (no more work to do), or the Queue is full of CQEs that have been 280 * processed, but not popped back to the HBA then this routine will return NULL. 281 **/ 282 static struct lpfc_cqe * 283 lpfc_sli4_cq_get(struct lpfc_queue *q) 284 { 285 struct lpfc_cqe *cqe; 286 287 /* If the next CQE is not valid then we are done */ 288 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 289 return NULL; 290 /* If the host has not yet processed the next entry then we are done */ 291 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 292 return NULL; 293 294 cqe = q->qe[q->hba_index].cqe; 295 q->hba_index = ((q->hba_index + 1) % q->entry_count); 296 return cqe; 297 } 298 299 /** 300 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 301 * @q: The Completion Queue that the host has completed processing for. 302 * @arm: Indicates whether the host wants to arms this CQ. 303 * 304 * This routine will mark all Completion queue entries on @q, from the last 305 * known completed entry to the last entry that was processed, as completed 306 * by clearing the valid bit for each completion queue entry. Then it will 307 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 308 * The internal host index in the @q will be updated by this routine to indicate 309 * that the host has finished processing the entries. The @arm parameter 310 * indicates that the queue should be rearmed when ringing the doorbell. 311 * 312 * This function will return the number of CQEs that were released. 313 **/ 314 uint32_t 315 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 316 { 317 uint32_t released = 0; 318 struct lpfc_cqe *temp_qe; 319 struct lpfc_register doorbell; 320 321 /* while there are valid entries */ 322 while (q->hba_index != q->host_index) { 323 temp_qe = q->qe[q->host_index].cqe; 324 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 325 released++; 326 q->host_index = ((q->host_index + 1) % q->entry_count); 327 } 328 if (unlikely(released == 0 && !arm)) 329 return 0; 330 331 /* ring doorbell for number popped */ 332 doorbell.word0 = 0; 333 if (arm) 334 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 335 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 336 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 337 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); 338 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 339 return released; 340 } 341 342 /** 343 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 344 * @q: The Header Receive Queue to operate on. 345 * @wqe: The Receive Queue Entry to put on the Receive queue. 346 * 347 * This routine will copy the contents of @wqe to the next available entry on 348 * the @q. This function will then ring the Receive Queue Doorbell to signal the 349 * HBA to start processing the Receive Queue Entry. This function returns the 350 * index that the rqe was copied to if successful. If no entries are available 351 * on @q then this function will return -ENOMEM. 352 * The caller is expected to hold the hbalock when calling this routine. 353 **/ 354 static int 355 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 356 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 357 { 358 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; 359 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; 360 struct lpfc_register doorbell; 361 int put_index = hq->host_index; 362 363 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 364 return -EINVAL; 365 if (hq->host_index != dq->host_index) 366 return -EINVAL; 367 /* If the host has not yet processed the next entry then we are done */ 368 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 369 return -EBUSY; 370 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 371 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 372 373 /* Update the host index to point to the next slot */ 374 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 375 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 376 377 /* Ring The Header Receive Queue Doorbell */ 378 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { 379 doorbell.word0 = 0; 380 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 381 LPFC_RQ_POST_BATCH); 382 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 383 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 384 } 385 return put_index; 386 } 387 388 /** 389 * lpfc_sli4_rq_release - Updates internal hba index for RQ 390 * @q: The Header Receive Queue to operate on. 391 * 392 * This routine will update the HBA index of a queue to reflect consumption of 393 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 394 * consumed an entry the host calls this function to update the queue's 395 * internal pointers. This routine returns the number of entries that were 396 * consumed by the HBA. 397 **/ 398 static uint32_t 399 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 400 { 401 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 402 return 0; 403 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 404 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 405 return 1; 406 } 407 408 /** 409 * lpfc_cmd_iocb - Get next command iocb entry in the ring 410 * @phba: Pointer to HBA context object. 411 * @pring: Pointer to driver SLI ring object. 412 * 413 * This function returns pointer to next command iocb entry 414 * in the command ring. The caller must hold hbalock to prevent 415 * other threads consume the next command iocb. 416 * SLI-2/SLI-3 provide different sized iocbs. 417 **/ 418 static inline IOCB_t * 419 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 420 { 421 return (IOCB_t *) (((char *) pring->cmdringaddr) + 422 pring->cmdidx * phba->iocb_cmd_size); 423 } 424 425 /** 426 * lpfc_resp_iocb - Get next response iocb entry in the ring 427 * @phba: Pointer to HBA context object. 428 * @pring: Pointer to driver SLI ring object. 429 * 430 * This function returns pointer to next response iocb entry 431 * in the response ring. The caller must hold hbalock to make sure 432 * that no other thread consume the next response iocb. 433 * SLI-2/SLI-3 provide different sized iocbs. 434 **/ 435 static inline IOCB_t * 436 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 437 { 438 return (IOCB_t *) (((char *) pring->rspringaddr) + 439 pring->rspidx * phba->iocb_rsp_size); 440 } 441 442 /** 443 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 444 * @phba: Pointer to HBA context object. 445 * 446 * This function is called with hbalock held. This function 447 * allocates a new driver iocb object from the iocb pool. If the 448 * allocation is successful, it returns pointer to the newly 449 * allocated iocb object else it returns NULL. 450 **/ 451 static struct lpfc_iocbq * 452 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 453 { 454 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 455 struct lpfc_iocbq * iocbq = NULL; 456 457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 458 459 if (iocbq) 460 phba->iocb_cnt++; 461 if (phba->iocb_cnt > phba->iocb_max) 462 phba->iocb_max = phba->iocb_cnt; 463 return iocbq; 464 } 465 466 /** 467 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 468 * @phba: Pointer to HBA context object. 469 * @xritag: XRI value. 470 * 471 * This function clears the sglq pointer from the array of acive 472 * sglq's. The xritag that is passed in is used to index into the 473 * array. Before the xritag can be used it needs to be adjusted 474 * by subtracting the xribase. 475 * 476 * Returns sglq ponter = success, NULL = Failure. 477 **/ 478 static struct lpfc_sglq * 479 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 480 { 481 uint16_t adj_xri; 482 struct lpfc_sglq *sglq; 483 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 484 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 485 return NULL; 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; 487 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; 488 return sglq; 489 } 490 491 /** 492 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 493 * @phba: Pointer to HBA context object. 494 * @xritag: XRI value. 495 * 496 * This function returns the sglq pointer from the array of acive 497 * sglq's. The xritag that is passed in is used to index into the 498 * array. Before the xritag can be used it needs to be adjusted 499 * by subtracting the xribase. 500 * 501 * Returns sglq ponter = success, NULL = Failure. 502 **/ 503 struct lpfc_sglq * 504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 505 { 506 uint16_t adj_xri; 507 struct lpfc_sglq *sglq; 508 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 509 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 510 return NULL; 511 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; 512 return sglq; 513 } 514 515 /** 516 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 517 * @phba: Pointer to HBA context object. 518 * 519 * This function is called with hbalock held. This function 520 * Gets a new driver sglq object from the sglq list. If the 521 * list is not empty then it is successful, it returns pointer to the newly 522 * allocated sglq object else it returns NULL. 523 **/ 524 static struct lpfc_sglq * 525 __lpfc_sli_get_sglq(struct lpfc_hba *phba) 526 { 527 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 528 struct lpfc_sglq *sglq = NULL; 529 uint16_t adj_xri; 530 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 531 if (!sglq) 532 return NULL; 533 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 534 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 535 sglq->state = SGL_ALLOCATED; 536 return sglq; 537 } 538 539 /** 540 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 541 * @phba: Pointer to HBA context object. 542 * 543 * This function is called with no lock held. This function 544 * allocates a new driver iocb object from the iocb pool. If the 545 * allocation is successful, it returns pointer to the newly 546 * allocated iocb object else it returns NULL. 547 **/ 548 struct lpfc_iocbq * 549 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 550 { 551 struct lpfc_iocbq * iocbq = NULL; 552 unsigned long iflags; 553 554 spin_lock_irqsave(&phba->hbalock, iflags); 555 iocbq = __lpfc_sli_get_iocbq(phba); 556 spin_unlock_irqrestore(&phba->hbalock, iflags); 557 return iocbq; 558 } 559 560 /** 561 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 562 * @phba: Pointer to HBA context object. 563 * @iocbq: Pointer to driver iocb object. 564 * 565 * This function is called with hbalock held to release driver 566 * iocb object to the iocb pool. The iotag in the iocb object 567 * does not change for each use of the iocb object. This function 568 * clears all other fields of the iocb object when it is freed. 569 * The sqlq structure that holds the xritag and phys and virtual 570 * mappings for the scatter gather list is retrieved from the 571 * active array of sglq. The get of the sglq pointer also clears 572 * the entry in the array. If the status of the IO indiactes that 573 * this IO was aborted then the sglq entry it put on the 574 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 575 * IO has good status or fails for any other reason then the sglq 576 * entry is added to the free list (lpfc_sgl_list). 577 **/ 578 static void 579 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 580 { 581 struct lpfc_sglq *sglq; 582 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 583 unsigned long iflag = 0; 584 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 585 586 if (iocbq->sli4_xritag == NO_XRI) 587 sglq = NULL; 588 else 589 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 590 if (sglq) { 591 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 592 (sglq->state != SGL_XRI_ABORTED)) { 593 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 594 iflag); 595 list_add(&sglq->list, 596 &phba->sli4_hba.lpfc_abts_els_sgl_list); 597 spin_unlock_irqrestore( 598 &phba->sli4_hba.abts_sgl_list_lock, iflag); 599 } else { 600 sglq->state = SGL_FREED; 601 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 602 603 /* Check if TXQ queue needs to be serviced */ 604 if (pring->txq_cnt) 605 lpfc_worker_wake_up(phba); 606 } 607 } 608 609 610 /* 611 * Clean all volatile data fields, preserve iotag and node struct. 612 */ 613 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 614 iocbq->sli4_xritag = NO_XRI; 615 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 616 } 617 618 619 /** 620 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 621 * @phba: Pointer to HBA context object. 622 * @iocbq: Pointer to driver iocb object. 623 * 624 * This function is called with hbalock held to release driver 625 * iocb object to the iocb pool. The iotag in the iocb object 626 * does not change for each use of the iocb object. This function 627 * clears all other fields of the iocb object when it is freed. 628 **/ 629 static void 630 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 631 { 632 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 633 634 /* 635 * Clean all volatile data fields, preserve iotag and node struct. 636 */ 637 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 638 iocbq->sli4_xritag = NO_XRI; 639 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 640 } 641 642 /** 643 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 644 * @phba: Pointer to HBA context object. 645 * @iocbq: Pointer to driver iocb object. 646 * 647 * This function is called with hbalock held to release driver 648 * iocb object to the iocb pool. The iotag in the iocb object 649 * does not change for each use of the iocb object. This function 650 * clears all other fields of the iocb object when it is freed. 651 **/ 652 static void 653 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 654 { 655 phba->__lpfc_sli_release_iocbq(phba, iocbq); 656 phba->iocb_cnt--; 657 } 658 659 /** 660 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 661 * @phba: Pointer to HBA context object. 662 * @iocbq: Pointer to driver iocb object. 663 * 664 * This function is called with no lock held to release the iocb to 665 * iocb pool. 666 **/ 667 void 668 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 669 { 670 unsigned long iflags; 671 672 /* 673 * Clean all volatile data fields, preserve iotag and node struct. 674 */ 675 spin_lock_irqsave(&phba->hbalock, iflags); 676 __lpfc_sli_release_iocbq(phba, iocbq); 677 spin_unlock_irqrestore(&phba->hbalock, iflags); 678 } 679 680 /** 681 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 682 * @phba: Pointer to HBA context object. 683 * @iocblist: List of IOCBs. 684 * @ulpstatus: ULP status in IOCB command field. 685 * @ulpWord4: ULP word-4 in IOCB command field. 686 * 687 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 688 * on the list by invoking the complete callback function associated with the 689 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 690 * fields. 691 **/ 692 void 693 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 694 uint32_t ulpstatus, uint32_t ulpWord4) 695 { 696 struct lpfc_iocbq *piocb; 697 698 while (!list_empty(iocblist)) { 699 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 700 701 if (!piocb->iocb_cmpl) 702 lpfc_sli_release_iocbq(phba, piocb); 703 else { 704 piocb->iocb.ulpStatus = ulpstatus; 705 piocb->iocb.un.ulpWord[4] = ulpWord4; 706 (piocb->iocb_cmpl) (phba, piocb, piocb); 707 } 708 } 709 return; 710 } 711 712 /** 713 * lpfc_sli_iocb_cmd_type - Get the iocb type 714 * @iocb_cmnd: iocb command code. 715 * 716 * This function is called by ring event handler function to get the iocb type. 717 * This function translates the iocb command to an iocb command type used to 718 * decide the final disposition of each completed IOCB. 719 * The function returns 720 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 721 * LPFC_SOL_IOCB if it is a solicited iocb completion 722 * LPFC_ABORT_IOCB if it is an abort iocb 723 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 724 * 725 * The caller is not required to hold any lock. 726 **/ 727 static lpfc_iocb_type 728 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 729 { 730 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 731 732 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 733 return 0; 734 735 switch (iocb_cmnd) { 736 case CMD_XMIT_SEQUENCE_CR: 737 case CMD_XMIT_SEQUENCE_CX: 738 case CMD_XMIT_BCAST_CN: 739 case CMD_XMIT_BCAST_CX: 740 case CMD_ELS_REQUEST_CR: 741 case CMD_ELS_REQUEST_CX: 742 case CMD_CREATE_XRI_CR: 743 case CMD_CREATE_XRI_CX: 744 case CMD_GET_RPI_CN: 745 case CMD_XMIT_ELS_RSP_CX: 746 case CMD_GET_RPI_CR: 747 case CMD_FCP_IWRITE_CR: 748 case CMD_FCP_IWRITE_CX: 749 case CMD_FCP_IREAD_CR: 750 case CMD_FCP_IREAD_CX: 751 case CMD_FCP_ICMND_CR: 752 case CMD_FCP_ICMND_CX: 753 case CMD_FCP_TSEND_CX: 754 case CMD_FCP_TRSP_CX: 755 case CMD_FCP_TRECEIVE_CX: 756 case CMD_FCP_AUTO_TRSP_CX: 757 case CMD_ADAPTER_MSG: 758 case CMD_ADAPTER_DUMP: 759 case CMD_XMIT_SEQUENCE64_CR: 760 case CMD_XMIT_SEQUENCE64_CX: 761 case CMD_XMIT_BCAST64_CN: 762 case CMD_XMIT_BCAST64_CX: 763 case CMD_ELS_REQUEST64_CR: 764 case CMD_ELS_REQUEST64_CX: 765 case CMD_FCP_IWRITE64_CR: 766 case CMD_FCP_IWRITE64_CX: 767 case CMD_FCP_IREAD64_CR: 768 case CMD_FCP_IREAD64_CX: 769 case CMD_FCP_ICMND64_CR: 770 case CMD_FCP_ICMND64_CX: 771 case CMD_FCP_TSEND64_CX: 772 case CMD_FCP_TRSP64_CX: 773 case CMD_FCP_TRECEIVE64_CX: 774 case CMD_GEN_REQUEST64_CR: 775 case CMD_GEN_REQUEST64_CX: 776 case CMD_XMIT_ELS_RSP64_CX: 777 case DSSCMD_IWRITE64_CR: 778 case DSSCMD_IWRITE64_CX: 779 case DSSCMD_IREAD64_CR: 780 case DSSCMD_IREAD64_CX: 781 type = LPFC_SOL_IOCB; 782 break; 783 case CMD_ABORT_XRI_CN: 784 case CMD_ABORT_XRI_CX: 785 case CMD_CLOSE_XRI_CN: 786 case CMD_CLOSE_XRI_CX: 787 case CMD_XRI_ABORTED_CX: 788 case CMD_ABORT_MXRI64_CN: 789 case CMD_XMIT_BLS_RSP64_CX: 790 type = LPFC_ABORT_IOCB; 791 break; 792 case CMD_RCV_SEQUENCE_CX: 793 case CMD_RCV_ELS_REQ_CX: 794 case CMD_RCV_SEQUENCE64_CX: 795 case CMD_RCV_ELS_REQ64_CX: 796 case CMD_ASYNC_STATUS: 797 case CMD_IOCB_RCV_SEQ64_CX: 798 case CMD_IOCB_RCV_ELS64_CX: 799 case CMD_IOCB_RCV_CONT64_CX: 800 case CMD_IOCB_RET_XRI64_CX: 801 type = LPFC_UNSOL_IOCB; 802 break; 803 case CMD_IOCB_XMIT_MSEQ64_CR: 804 case CMD_IOCB_XMIT_MSEQ64_CX: 805 case CMD_IOCB_RCV_SEQ_LIST64_CX: 806 case CMD_IOCB_RCV_ELS_LIST64_CX: 807 case CMD_IOCB_CLOSE_EXTENDED_CN: 808 case CMD_IOCB_ABORT_EXTENDED_CN: 809 case CMD_IOCB_RET_HBQE64_CN: 810 case CMD_IOCB_FCP_IBIDIR64_CR: 811 case CMD_IOCB_FCP_IBIDIR64_CX: 812 case CMD_IOCB_FCP_ITASKMGT64_CX: 813 case CMD_IOCB_LOGENTRY_CN: 814 case CMD_IOCB_LOGENTRY_ASYNC_CN: 815 printk("%s - Unhandled SLI-3 Command x%x\n", 816 __func__, iocb_cmnd); 817 type = LPFC_UNKNOWN_IOCB; 818 break; 819 default: 820 type = LPFC_UNKNOWN_IOCB; 821 break; 822 } 823 824 return type; 825 } 826 827 /** 828 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 829 * @phba: Pointer to HBA context object. 830 * 831 * This function is called from SLI initialization code 832 * to configure every ring of the HBA's SLI interface. The 833 * caller is not required to hold any lock. This function issues 834 * a config_ring mailbox command for each ring. 835 * This function returns zero if successful else returns a negative 836 * error code. 837 **/ 838 static int 839 lpfc_sli_ring_map(struct lpfc_hba *phba) 840 { 841 struct lpfc_sli *psli = &phba->sli; 842 LPFC_MBOXQ_t *pmb; 843 MAILBOX_t *pmbox; 844 int i, rc, ret = 0; 845 846 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 847 if (!pmb) 848 return -ENOMEM; 849 pmbox = &pmb->u.mb; 850 phba->link_state = LPFC_INIT_MBX_CMDS; 851 for (i = 0; i < psli->num_rings; i++) { 852 lpfc_config_ring(phba, i, pmb); 853 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 854 if (rc != MBX_SUCCESS) { 855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 856 "0446 Adapter failed to init (%d), " 857 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 858 "ring %d\n", 859 rc, pmbox->mbxCommand, 860 pmbox->mbxStatus, i); 861 phba->link_state = LPFC_HBA_ERROR; 862 ret = -ENXIO; 863 break; 864 } 865 } 866 mempool_free(pmb, phba->mbox_mem_pool); 867 return ret; 868 } 869 870 /** 871 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 872 * @phba: Pointer to HBA context object. 873 * @pring: Pointer to driver SLI ring object. 874 * @piocb: Pointer to the driver iocb object. 875 * 876 * This function is called with hbalock held. The function adds the 877 * new iocb to txcmplq of the given ring. This function always returns 878 * 0. If this function is called for ELS ring, this function checks if 879 * there is a vport associated with the ELS command. This function also 880 * starts els_tmofunc timer if this is an ELS command. 881 **/ 882 static int 883 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 884 struct lpfc_iocbq *piocb) 885 { 886 list_add_tail(&piocb->list, &pring->txcmplq); 887 piocb->iocb_flag |= LPFC_IO_ON_Q; 888 pring->txcmplq_cnt++; 889 if (pring->txcmplq_cnt > pring->txcmplq_max) 890 pring->txcmplq_max = pring->txcmplq_cnt; 891 892 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 893 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 894 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 895 if (!piocb->vport) 896 BUG(); 897 else 898 mod_timer(&piocb->vport->els_tmofunc, 899 jiffies + HZ * (phba->fc_ratov << 1)); 900 } 901 902 903 return 0; 904 } 905 906 /** 907 * lpfc_sli_ringtx_get - Get first element of the txq 908 * @phba: Pointer to HBA context object. 909 * @pring: Pointer to driver SLI ring object. 910 * 911 * This function is called with hbalock held to get next 912 * iocb in txq of the given ring. If there is any iocb in 913 * the txq, the function returns first iocb in the list after 914 * removing the iocb from the list, else it returns NULL. 915 **/ 916 struct lpfc_iocbq * 917 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 918 { 919 struct lpfc_iocbq *cmd_iocb; 920 921 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 922 if (cmd_iocb != NULL) 923 pring->txq_cnt--; 924 return cmd_iocb; 925 } 926 927 /** 928 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 929 * @phba: Pointer to HBA context object. 930 * @pring: Pointer to driver SLI ring object. 931 * 932 * This function is called with hbalock held and the caller must post the 933 * iocb without releasing the lock. If the caller releases the lock, 934 * iocb slot returned by the function is not guaranteed to be available. 935 * The function returns pointer to the next available iocb slot if there 936 * is available slot in the ring, else it returns NULL. 937 * If the get index of the ring is ahead of the put index, the function 938 * will post an error attention event to the worker thread to take the 939 * HBA to offline state. 940 **/ 941 static IOCB_t * 942 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 943 { 944 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 945 uint32_t max_cmd_idx = pring->numCiocb; 946 if ((pring->next_cmdidx == pring->cmdidx) && 947 (++pring->next_cmdidx >= max_cmd_idx)) 948 pring->next_cmdidx = 0; 949 950 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 951 952 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 953 954 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 956 "0315 Ring %d issue: portCmdGet %d " 957 "is bigger than cmd ring %d\n", 958 pring->ringno, 959 pring->local_getidx, max_cmd_idx); 960 961 phba->link_state = LPFC_HBA_ERROR; 962 /* 963 * All error attention handlers are posted to 964 * worker thread 965 */ 966 phba->work_ha |= HA_ERATT; 967 phba->work_hs = HS_FFER3; 968 969 lpfc_worker_wake_up(phba); 970 971 return NULL; 972 } 973 974 if (pring->local_getidx == pring->next_cmdidx) 975 return NULL; 976 } 977 978 return lpfc_cmd_iocb(phba, pring); 979 } 980 981 /** 982 * lpfc_sli_next_iotag - Get an iotag for the iocb 983 * @phba: Pointer to HBA context object. 984 * @iocbq: Pointer to driver iocb object. 985 * 986 * This function gets an iotag for the iocb. If there is no unused iotag and 987 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 988 * array and assigns a new iotag. 989 * The function returns the allocated iotag if successful, else returns zero. 990 * Zero is not a valid iotag. 991 * The caller is not required to hold any lock. 992 **/ 993 uint16_t 994 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 995 { 996 struct lpfc_iocbq **new_arr; 997 struct lpfc_iocbq **old_arr; 998 size_t new_len; 999 struct lpfc_sli *psli = &phba->sli; 1000 uint16_t iotag; 1001 1002 spin_lock_irq(&phba->hbalock); 1003 iotag = psli->last_iotag; 1004 if(++iotag < psli->iocbq_lookup_len) { 1005 psli->last_iotag = iotag; 1006 psli->iocbq_lookup[iotag] = iocbq; 1007 spin_unlock_irq(&phba->hbalock); 1008 iocbq->iotag = iotag; 1009 return iotag; 1010 } else if (psli->iocbq_lookup_len < (0xffff 1011 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1012 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1013 spin_unlock_irq(&phba->hbalock); 1014 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1015 GFP_KERNEL); 1016 if (new_arr) { 1017 spin_lock_irq(&phba->hbalock); 1018 old_arr = psli->iocbq_lookup; 1019 if (new_len <= psli->iocbq_lookup_len) { 1020 /* highly unprobable case */ 1021 kfree(new_arr); 1022 iotag = psli->last_iotag; 1023 if(++iotag < psli->iocbq_lookup_len) { 1024 psli->last_iotag = iotag; 1025 psli->iocbq_lookup[iotag] = iocbq; 1026 spin_unlock_irq(&phba->hbalock); 1027 iocbq->iotag = iotag; 1028 return iotag; 1029 } 1030 spin_unlock_irq(&phba->hbalock); 1031 return 0; 1032 } 1033 if (psli->iocbq_lookup) 1034 memcpy(new_arr, old_arr, 1035 ((psli->last_iotag + 1) * 1036 sizeof (struct lpfc_iocbq *))); 1037 psli->iocbq_lookup = new_arr; 1038 psli->iocbq_lookup_len = new_len; 1039 psli->last_iotag = iotag; 1040 psli->iocbq_lookup[iotag] = iocbq; 1041 spin_unlock_irq(&phba->hbalock); 1042 iocbq->iotag = iotag; 1043 kfree(old_arr); 1044 return iotag; 1045 } 1046 } else 1047 spin_unlock_irq(&phba->hbalock); 1048 1049 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1050 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1051 psli->last_iotag); 1052 1053 return 0; 1054 } 1055 1056 /** 1057 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1058 * @phba: Pointer to HBA context object. 1059 * @pring: Pointer to driver SLI ring object. 1060 * @iocb: Pointer to iocb slot in the ring. 1061 * @nextiocb: Pointer to driver iocb object which need to be 1062 * posted to firmware. 1063 * 1064 * This function is called with hbalock held to post a new iocb to 1065 * the firmware. This function copies the new iocb to ring iocb slot and 1066 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1067 * a completion call back for this iocb else the function will free the 1068 * iocb object. 1069 **/ 1070 static void 1071 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1072 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1073 { 1074 /* 1075 * Set up an iotag 1076 */ 1077 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1078 1079 1080 if (pring->ringno == LPFC_ELS_RING) { 1081 lpfc_debugfs_slow_ring_trc(phba, 1082 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1083 *(((uint32_t *) &nextiocb->iocb) + 4), 1084 *(((uint32_t *) &nextiocb->iocb) + 6), 1085 *(((uint32_t *) &nextiocb->iocb) + 7)); 1086 } 1087 1088 /* 1089 * Issue iocb command to adapter 1090 */ 1091 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1092 wmb(); 1093 pring->stats.iocb_cmd++; 1094 1095 /* 1096 * If there is no completion routine to call, we can release the 1097 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1098 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1099 */ 1100 if (nextiocb->iocb_cmpl) 1101 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1102 else 1103 __lpfc_sli_release_iocbq(phba, nextiocb); 1104 1105 /* 1106 * Let the HBA know what IOCB slot will be the next one the 1107 * driver will put a command into. 1108 */ 1109 pring->cmdidx = pring->next_cmdidx; 1110 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1111 } 1112 1113 /** 1114 * lpfc_sli_update_full_ring - Update the chip attention register 1115 * @phba: Pointer to HBA context object. 1116 * @pring: Pointer to driver SLI ring object. 1117 * 1118 * The caller is not required to hold any lock for calling this function. 1119 * This function updates the chip attention bits for the ring to inform firmware 1120 * that there are pending work to be done for this ring and requests an 1121 * interrupt when there is space available in the ring. This function is 1122 * called when the driver is unable to post more iocbs to the ring due 1123 * to unavailability of space in the ring. 1124 **/ 1125 static void 1126 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1127 { 1128 int ringno = pring->ringno; 1129 1130 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1131 1132 wmb(); 1133 1134 /* 1135 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1136 * The HBA will tell us when an IOCB entry is available. 1137 */ 1138 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1139 readl(phba->CAregaddr); /* flush */ 1140 1141 pring->stats.iocb_cmd_full++; 1142 } 1143 1144 /** 1145 * lpfc_sli_update_ring - Update chip attention register 1146 * @phba: Pointer to HBA context object. 1147 * @pring: Pointer to driver SLI ring object. 1148 * 1149 * This function updates the chip attention register bit for the 1150 * given ring to inform HBA that there is more work to be done 1151 * in this ring. The caller is not required to hold any lock. 1152 **/ 1153 static void 1154 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1155 { 1156 int ringno = pring->ringno; 1157 1158 /* 1159 * Tell the HBA that there is work to do in this ring. 1160 */ 1161 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1162 wmb(); 1163 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1164 readl(phba->CAregaddr); /* flush */ 1165 } 1166 } 1167 1168 /** 1169 * lpfc_sli_resume_iocb - Process iocbs in the txq 1170 * @phba: Pointer to HBA context object. 1171 * @pring: Pointer to driver SLI ring object. 1172 * 1173 * This function is called with hbalock held to post pending iocbs 1174 * in the txq to the firmware. This function is called when driver 1175 * detects space available in the ring. 1176 **/ 1177 static void 1178 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1179 { 1180 IOCB_t *iocb; 1181 struct lpfc_iocbq *nextiocb; 1182 1183 /* 1184 * Check to see if: 1185 * (a) there is anything on the txq to send 1186 * (b) link is up 1187 * (c) link attention events can be processed (fcp ring only) 1188 * (d) IOCB processing is not blocked by the outstanding mbox command. 1189 */ 1190 if (pring->txq_cnt && 1191 lpfc_is_link_up(phba) && 1192 (pring->ringno != phba->sli.fcp_ring || 1193 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1194 1195 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1196 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1197 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1198 1199 if (iocb) 1200 lpfc_sli_update_ring(phba, pring); 1201 else 1202 lpfc_sli_update_full_ring(phba, pring); 1203 } 1204 1205 return; 1206 } 1207 1208 /** 1209 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1210 * @phba: Pointer to HBA context object. 1211 * @hbqno: HBQ number. 1212 * 1213 * This function is called with hbalock held to get the next 1214 * available slot for the given HBQ. If there is free slot 1215 * available for the HBQ it will return pointer to the next available 1216 * HBQ entry else it will return NULL. 1217 **/ 1218 static struct lpfc_hbq_entry * 1219 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1220 { 1221 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1222 1223 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1224 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1225 hbqp->next_hbqPutIdx = 0; 1226 1227 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1228 uint32_t raw_index = phba->hbq_get[hbqno]; 1229 uint32_t getidx = le32_to_cpu(raw_index); 1230 1231 hbqp->local_hbqGetIdx = getidx; 1232 1233 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1234 lpfc_printf_log(phba, KERN_ERR, 1235 LOG_SLI | LOG_VPORT, 1236 "1802 HBQ %d: local_hbqGetIdx " 1237 "%u is > than hbqp->entry_count %u\n", 1238 hbqno, hbqp->local_hbqGetIdx, 1239 hbqp->entry_count); 1240 1241 phba->link_state = LPFC_HBA_ERROR; 1242 return NULL; 1243 } 1244 1245 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1246 return NULL; 1247 } 1248 1249 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1250 hbqp->hbqPutIdx; 1251 } 1252 1253 /** 1254 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1255 * @phba: Pointer to HBA context object. 1256 * 1257 * This function is called with no lock held to free all the 1258 * hbq buffers while uninitializing the SLI interface. It also 1259 * frees the HBQ buffers returned by the firmware but not yet 1260 * processed by the upper layers. 1261 **/ 1262 void 1263 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1264 { 1265 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1266 struct hbq_dmabuf *hbq_buf; 1267 unsigned long flags; 1268 int i, hbq_count; 1269 uint32_t hbqno; 1270 1271 hbq_count = lpfc_sli_hbq_count(); 1272 /* Return all memory used by all HBQs */ 1273 spin_lock_irqsave(&phba->hbalock, flags); 1274 for (i = 0; i < hbq_count; ++i) { 1275 list_for_each_entry_safe(dmabuf, next_dmabuf, 1276 &phba->hbqs[i].hbq_buffer_list, list) { 1277 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1278 list_del(&hbq_buf->dbuf.list); 1279 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1280 } 1281 phba->hbqs[i].buffer_count = 0; 1282 } 1283 /* Return all HBQ buffer that are in-fly */ 1284 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1285 list) { 1286 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1287 list_del(&hbq_buf->dbuf.list); 1288 if (hbq_buf->tag == -1) { 1289 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1290 (phba, hbq_buf); 1291 } else { 1292 hbqno = hbq_buf->tag >> 16; 1293 if (hbqno >= LPFC_MAX_HBQS) 1294 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1295 (phba, hbq_buf); 1296 else 1297 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1298 hbq_buf); 1299 } 1300 } 1301 1302 /* Mark the HBQs not in use */ 1303 phba->hbq_in_use = 0; 1304 spin_unlock_irqrestore(&phba->hbalock, flags); 1305 } 1306 1307 /** 1308 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1309 * @phba: Pointer to HBA context object. 1310 * @hbqno: HBQ number. 1311 * @hbq_buf: Pointer to HBQ buffer. 1312 * 1313 * This function is called with the hbalock held to post a 1314 * hbq buffer to the firmware. If the function finds an empty 1315 * slot in the HBQ, it will post the buffer. The function will return 1316 * pointer to the hbq entry if it successfully post the buffer 1317 * else it will return NULL. 1318 **/ 1319 static int 1320 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1321 struct hbq_dmabuf *hbq_buf) 1322 { 1323 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1324 } 1325 1326 /** 1327 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1328 * @phba: Pointer to HBA context object. 1329 * @hbqno: HBQ number. 1330 * @hbq_buf: Pointer to HBQ buffer. 1331 * 1332 * This function is called with the hbalock held to post a hbq buffer to the 1333 * firmware. If the function finds an empty slot in the HBQ, it will post the 1334 * buffer and place it on the hbq_buffer_list. The function will return zero if 1335 * it successfully post the buffer else it will return an error. 1336 **/ 1337 static int 1338 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1339 struct hbq_dmabuf *hbq_buf) 1340 { 1341 struct lpfc_hbq_entry *hbqe; 1342 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1343 1344 /* Get next HBQ entry slot to use */ 1345 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1346 if (hbqe) { 1347 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1348 1349 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1350 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1351 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1352 hbqe->bde.tus.f.bdeFlags = 0; 1353 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1354 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1355 /* Sync SLIM */ 1356 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1357 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1358 /* flush */ 1359 readl(phba->hbq_put + hbqno); 1360 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1361 return 0; 1362 } else 1363 return -ENOMEM; 1364 } 1365 1366 /** 1367 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1368 * @phba: Pointer to HBA context object. 1369 * @hbqno: HBQ number. 1370 * @hbq_buf: Pointer to HBQ buffer. 1371 * 1372 * This function is called with the hbalock held to post an RQE to the SLI4 1373 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1374 * the hbq_buffer_list and return zero, otherwise it will return an error. 1375 **/ 1376 static int 1377 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1378 struct hbq_dmabuf *hbq_buf) 1379 { 1380 int rc; 1381 struct lpfc_rqe hrqe; 1382 struct lpfc_rqe drqe; 1383 1384 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1385 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1386 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1387 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1388 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1389 &hrqe, &drqe); 1390 if (rc < 0) 1391 return rc; 1392 hbq_buf->tag = rc; 1393 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1394 return 0; 1395 } 1396 1397 /* HBQ for ELS and CT traffic. */ 1398 static struct lpfc_hbq_init lpfc_els_hbq = { 1399 .rn = 1, 1400 .entry_count = 256, 1401 .mask_count = 0, 1402 .profile = 0, 1403 .ring_mask = (1 << LPFC_ELS_RING), 1404 .buffer_count = 0, 1405 .init_count = 40, 1406 .add_count = 40, 1407 }; 1408 1409 /* HBQ for the extra ring if needed */ 1410 static struct lpfc_hbq_init lpfc_extra_hbq = { 1411 .rn = 1, 1412 .entry_count = 200, 1413 .mask_count = 0, 1414 .profile = 0, 1415 .ring_mask = (1 << LPFC_EXTRA_RING), 1416 .buffer_count = 0, 1417 .init_count = 0, 1418 .add_count = 5, 1419 }; 1420 1421 /* Array of HBQs */ 1422 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1423 &lpfc_els_hbq, 1424 &lpfc_extra_hbq, 1425 }; 1426 1427 /** 1428 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1429 * @phba: Pointer to HBA context object. 1430 * @hbqno: HBQ number. 1431 * @count: Number of HBQ buffers to be posted. 1432 * 1433 * This function is called with no lock held to post more hbq buffers to the 1434 * given HBQ. The function returns the number of HBQ buffers successfully 1435 * posted. 1436 **/ 1437 static int 1438 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1439 { 1440 uint32_t i, posted = 0; 1441 unsigned long flags; 1442 struct hbq_dmabuf *hbq_buffer; 1443 LIST_HEAD(hbq_buf_list); 1444 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1445 return 0; 1446 1447 if ((phba->hbqs[hbqno].buffer_count + count) > 1448 lpfc_hbq_defs[hbqno]->entry_count) 1449 count = lpfc_hbq_defs[hbqno]->entry_count - 1450 phba->hbqs[hbqno].buffer_count; 1451 if (!count) 1452 return 0; 1453 /* Allocate HBQ entries */ 1454 for (i = 0; i < count; i++) { 1455 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1456 if (!hbq_buffer) 1457 break; 1458 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1459 } 1460 /* Check whether HBQ is still in use */ 1461 spin_lock_irqsave(&phba->hbalock, flags); 1462 if (!phba->hbq_in_use) 1463 goto err; 1464 while (!list_empty(&hbq_buf_list)) { 1465 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1466 dbuf.list); 1467 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1468 (hbqno << 16)); 1469 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1470 phba->hbqs[hbqno].buffer_count++; 1471 posted++; 1472 } else 1473 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1474 } 1475 spin_unlock_irqrestore(&phba->hbalock, flags); 1476 return posted; 1477 err: 1478 spin_unlock_irqrestore(&phba->hbalock, flags); 1479 while (!list_empty(&hbq_buf_list)) { 1480 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1481 dbuf.list); 1482 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1483 } 1484 return 0; 1485 } 1486 1487 /** 1488 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1489 * @phba: Pointer to HBA context object. 1490 * @qno: HBQ number. 1491 * 1492 * This function posts more buffers to the HBQ. This function 1493 * is called with no lock held. The function returns the number of HBQ entries 1494 * successfully allocated. 1495 **/ 1496 int 1497 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1498 { 1499 if (phba->sli_rev == LPFC_SLI_REV4) 1500 return 0; 1501 else 1502 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1503 lpfc_hbq_defs[qno]->add_count); 1504 } 1505 1506 /** 1507 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1508 * @phba: Pointer to HBA context object. 1509 * @qno: HBQ queue number. 1510 * 1511 * This function is called from SLI initialization code path with 1512 * no lock held to post initial HBQ buffers to firmware. The 1513 * function returns the number of HBQ entries successfully allocated. 1514 **/ 1515 static int 1516 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1517 { 1518 if (phba->sli_rev == LPFC_SLI_REV4) 1519 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1520 lpfc_hbq_defs[qno]->entry_count); 1521 else 1522 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1523 lpfc_hbq_defs[qno]->init_count); 1524 } 1525 1526 /** 1527 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1528 * @phba: Pointer to HBA context object. 1529 * @hbqno: HBQ number. 1530 * 1531 * This function removes the first hbq buffer on an hbq list and returns a 1532 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1533 **/ 1534 static struct hbq_dmabuf * 1535 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1536 { 1537 struct lpfc_dmabuf *d_buf; 1538 1539 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1540 if (!d_buf) 1541 return NULL; 1542 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1543 } 1544 1545 /** 1546 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1547 * @phba: Pointer to HBA context object. 1548 * @tag: Tag of the hbq buffer. 1549 * 1550 * This function is called with hbalock held. This function searches 1551 * for the hbq buffer associated with the given tag in the hbq buffer 1552 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1553 * it returns NULL. 1554 **/ 1555 static struct hbq_dmabuf * 1556 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1557 { 1558 struct lpfc_dmabuf *d_buf; 1559 struct hbq_dmabuf *hbq_buf; 1560 uint32_t hbqno; 1561 1562 hbqno = tag >> 16; 1563 if (hbqno >= LPFC_MAX_HBQS) 1564 return NULL; 1565 1566 spin_lock_irq(&phba->hbalock); 1567 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1568 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1569 if (hbq_buf->tag == tag) { 1570 spin_unlock_irq(&phba->hbalock); 1571 return hbq_buf; 1572 } 1573 } 1574 spin_unlock_irq(&phba->hbalock); 1575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1576 "1803 Bad hbq tag. Data: x%x x%x\n", 1577 tag, phba->hbqs[tag >> 16].buffer_count); 1578 return NULL; 1579 } 1580 1581 /** 1582 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 1583 * @phba: Pointer to HBA context object. 1584 * @hbq_buffer: Pointer to HBQ buffer. 1585 * 1586 * This function is called with hbalock. This function gives back 1587 * the hbq buffer to firmware. If the HBQ does not have space to 1588 * post the buffer, it will free the buffer. 1589 **/ 1590 void 1591 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 1592 { 1593 uint32_t hbqno; 1594 1595 if (hbq_buffer) { 1596 hbqno = hbq_buffer->tag >> 16; 1597 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 1598 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1599 } 1600 } 1601 1602 /** 1603 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 1604 * @mbxCommand: mailbox command code. 1605 * 1606 * This function is called by the mailbox event handler function to verify 1607 * that the completed mailbox command is a legitimate mailbox command. If the 1608 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 1609 * and the mailbox event handler will take the HBA offline. 1610 **/ 1611 static int 1612 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 1613 { 1614 uint8_t ret; 1615 1616 switch (mbxCommand) { 1617 case MBX_LOAD_SM: 1618 case MBX_READ_NV: 1619 case MBX_WRITE_NV: 1620 case MBX_WRITE_VPARMS: 1621 case MBX_RUN_BIU_DIAG: 1622 case MBX_INIT_LINK: 1623 case MBX_DOWN_LINK: 1624 case MBX_CONFIG_LINK: 1625 case MBX_CONFIG_RING: 1626 case MBX_RESET_RING: 1627 case MBX_READ_CONFIG: 1628 case MBX_READ_RCONFIG: 1629 case MBX_READ_SPARM: 1630 case MBX_READ_STATUS: 1631 case MBX_READ_RPI: 1632 case MBX_READ_XRI: 1633 case MBX_READ_REV: 1634 case MBX_READ_LNK_STAT: 1635 case MBX_REG_LOGIN: 1636 case MBX_UNREG_LOGIN: 1637 case MBX_READ_LA: 1638 case MBX_CLEAR_LA: 1639 case MBX_DUMP_MEMORY: 1640 case MBX_DUMP_CONTEXT: 1641 case MBX_RUN_DIAGS: 1642 case MBX_RESTART: 1643 case MBX_UPDATE_CFG: 1644 case MBX_DOWN_LOAD: 1645 case MBX_DEL_LD_ENTRY: 1646 case MBX_RUN_PROGRAM: 1647 case MBX_SET_MASK: 1648 case MBX_SET_VARIABLE: 1649 case MBX_UNREG_D_ID: 1650 case MBX_KILL_BOARD: 1651 case MBX_CONFIG_FARP: 1652 case MBX_BEACON: 1653 case MBX_LOAD_AREA: 1654 case MBX_RUN_BIU_DIAG64: 1655 case MBX_CONFIG_PORT: 1656 case MBX_READ_SPARM64: 1657 case MBX_READ_RPI64: 1658 case MBX_REG_LOGIN64: 1659 case MBX_READ_LA64: 1660 case MBX_WRITE_WWN: 1661 case MBX_SET_DEBUG: 1662 case MBX_LOAD_EXP_ROM: 1663 case MBX_ASYNCEVT_ENABLE: 1664 case MBX_REG_VPI: 1665 case MBX_UNREG_VPI: 1666 case MBX_HEARTBEAT: 1667 case MBX_PORT_CAPABILITIES: 1668 case MBX_PORT_IOV_CONTROL: 1669 case MBX_SLI4_CONFIG: 1670 case MBX_SLI4_REQ_FTRS: 1671 case MBX_REG_FCFI: 1672 case MBX_UNREG_FCFI: 1673 case MBX_REG_VFI: 1674 case MBX_UNREG_VFI: 1675 case MBX_INIT_VPI: 1676 case MBX_INIT_VFI: 1677 case MBX_RESUME_RPI: 1678 case MBX_READ_EVENT_LOG_STATUS: 1679 case MBX_READ_EVENT_LOG: 1680 case MBX_SECURITY_MGMT: 1681 case MBX_AUTH_PORT: 1682 ret = mbxCommand; 1683 break; 1684 default: 1685 ret = MBX_SHUTDOWN; 1686 break; 1687 } 1688 return ret; 1689 } 1690 1691 /** 1692 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 1693 * @phba: Pointer to HBA context object. 1694 * @pmboxq: Pointer to mailbox command. 1695 * 1696 * This is completion handler function for mailbox commands issued from 1697 * lpfc_sli_issue_mbox_wait function. This function is called by the 1698 * mailbox event handler function with no lock held. This function 1699 * will wake up thread waiting on the wait queue pointed by context1 1700 * of the mailbox. 1701 **/ 1702 void 1703 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1704 { 1705 wait_queue_head_t *pdone_q; 1706 unsigned long drvr_flag; 1707 1708 /* 1709 * If pdone_q is empty, the driver thread gave up waiting and 1710 * continued running. 1711 */ 1712 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 1713 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1714 pdone_q = (wait_queue_head_t *) pmboxq->context1; 1715 if (pdone_q) 1716 wake_up_interruptible(pdone_q); 1717 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1718 return; 1719 } 1720 1721 1722 /** 1723 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 1724 * @phba: Pointer to HBA context object. 1725 * @pmb: Pointer to mailbox object. 1726 * 1727 * This function is the default mailbox completion handler. It 1728 * frees the memory resources associated with the completed mailbox 1729 * command. If the completed command is a REG_LOGIN mailbox command, 1730 * this function will issue a UREG_LOGIN to re-claim the RPI. 1731 **/ 1732 void 1733 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1734 { 1735 struct lpfc_vport *vport = pmb->vport; 1736 struct lpfc_dmabuf *mp; 1737 struct lpfc_nodelist *ndlp; 1738 struct Scsi_Host *shost; 1739 uint16_t rpi, vpi; 1740 int rc; 1741 1742 mp = (struct lpfc_dmabuf *) (pmb->context1); 1743 1744 if (mp) { 1745 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1746 kfree(mp); 1747 } 1748 1749 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && 1750 (phba->sli_rev == LPFC_SLI_REV4) && 1751 (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0)) 1752 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); 1753 1754 /* 1755 * If a REG_LOGIN succeeded after node is destroyed or node 1756 * is in re-discovery driver need to cleanup the RPI. 1757 */ 1758 if (!(phba->pport->load_flag & FC_UNLOADING) && 1759 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 1760 !pmb->u.mb.mbxStatus) { 1761 rpi = pmb->u.mb.un.varWords[0]; 1762 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; 1763 lpfc_unreg_login(phba, vpi, rpi, pmb); 1764 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1765 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1766 if (rc != MBX_NOT_FINISHED) 1767 return; 1768 } 1769 1770 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 1771 !(phba->pport->load_flag & FC_UNLOADING) && 1772 !pmb->u.mb.mbxStatus) { 1773 shost = lpfc_shost_from_vport(vport); 1774 spin_lock_irq(shost->host_lock); 1775 vport->vpi_state |= LPFC_VPI_REGISTERED; 1776 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 1777 spin_unlock_irq(shost->host_lock); 1778 } 1779 1780 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 1781 ndlp = (struct lpfc_nodelist *)pmb->context2; 1782 lpfc_nlp_put(ndlp); 1783 pmb->context2 = NULL; 1784 } 1785 1786 /* Check security permission status on INIT_LINK mailbox command */ 1787 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 1788 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 1789 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1790 "2860 SLI authentication is required " 1791 "for INIT_LINK but has not done yet\n"); 1792 1793 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1794 lpfc_sli4_mbox_cmd_free(phba, pmb); 1795 else 1796 mempool_free(pmb, phba->mbox_mem_pool); 1797 } 1798 1799 /** 1800 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 1801 * @phba: Pointer to HBA context object. 1802 * 1803 * This function is called with no lock held. This function processes all 1804 * the completed mailbox commands and gives it to upper layers. The interrupt 1805 * service routine processes mailbox completion interrupt and adds completed 1806 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 1807 * Worker thread call lpfc_sli_handle_mb_event, which will return the 1808 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 1809 * function returns the mailbox commands to the upper layer by calling the 1810 * completion handler function of each mailbox. 1811 **/ 1812 int 1813 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 1814 { 1815 MAILBOX_t *pmbox; 1816 LPFC_MBOXQ_t *pmb; 1817 int rc; 1818 LIST_HEAD(cmplq); 1819 1820 phba->sli.slistat.mbox_event++; 1821 1822 /* Get all completed mailboxe buffers into the cmplq */ 1823 spin_lock_irq(&phba->hbalock); 1824 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 1825 spin_unlock_irq(&phba->hbalock); 1826 1827 /* Get a Mailbox buffer to setup mailbox commands for callback */ 1828 do { 1829 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 1830 if (pmb == NULL) 1831 break; 1832 1833 pmbox = &pmb->u.mb; 1834 1835 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1836 if (pmb->vport) { 1837 lpfc_debugfs_disc_trc(pmb->vport, 1838 LPFC_DISC_TRC_MBOX_VPORT, 1839 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 1840 (uint32_t)pmbox->mbxCommand, 1841 pmbox->un.varWords[0], 1842 pmbox->un.varWords[1]); 1843 } 1844 else { 1845 lpfc_debugfs_disc_trc(phba->pport, 1846 LPFC_DISC_TRC_MBOX, 1847 "MBOX cmpl: cmd:x%x mb:x%x x%x", 1848 (uint32_t)pmbox->mbxCommand, 1849 pmbox->un.varWords[0], 1850 pmbox->un.varWords[1]); 1851 } 1852 } 1853 1854 /* 1855 * It is a fatal error if unknown mbox command completion. 1856 */ 1857 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 1858 MBX_SHUTDOWN) { 1859 /* Unknown mailbox command compl */ 1860 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1861 "(%d):0323 Unknown Mailbox command " 1862 "x%x (x%x) Cmpl\n", 1863 pmb->vport ? pmb->vport->vpi : 0, 1864 pmbox->mbxCommand, 1865 lpfc_sli4_mbox_opcode_get(phba, pmb)); 1866 phba->link_state = LPFC_HBA_ERROR; 1867 phba->work_hs = HS_FFER3; 1868 lpfc_handle_eratt(phba); 1869 continue; 1870 } 1871 1872 if (pmbox->mbxStatus) { 1873 phba->sli.slistat.mbox_stat_err++; 1874 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 1875 /* Mbox cmd cmpl error - RETRYing */ 1876 lpfc_printf_log(phba, KERN_INFO, 1877 LOG_MBOX | LOG_SLI, 1878 "(%d):0305 Mbox cmd cmpl " 1879 "error - RETRYing Data: x%x " 1880 "(x%x) x%x x%x x%x\n", 1881 pmb->vport ? pmb->vport->vpi :0, 1882 pmbox->mbxCommand, 1883 lpfc_sli4_mbox_opcode_get(phba, 1884 pmb), 1885 pmbox->mbxStatus, 1886 pmbox->un.varWords[0], 1887 pmb->vport->port_state); 1888 pmbox->mbxStatus = 0; 1889 pmbox->mbxOwner = OWN_HOST; 1890 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1891 if (rc != MBX_NOT_FINISHED) 1892 continue; 1893 } 1894 } 1895 1896 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1897 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1898 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " 1899 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1900 pmb->vport ? pmb->vport->vpi : 0, 1901 pmbox->mbxCommand, 1902 lpfc_sli4_mbox_opcode_get(phba, pmb), 1903 pmb->mbox_cmpl, 1904 *((uint32_t *) pmbox), 1905 pmbox->un.varWords[0], 1906 pmbox->un.varWords[1], 1907 pmbox->un.varWords[2], 1908 pmbox->un.varWords[3], 1909 pmbox->un.varWords[4], 1910 pmbox->un.varWords[5], 1911 pmbox->un.varWords[6], 1912 pmbox->un.varWords[7]); 1913 1914 if (pmb->mbox_cmpl) 1915 pmb->mbox_cmpl(phba,pmb); 1916 } while (1); 1917 return 0; 1918 } 1919 1920 /** 1921 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 1922 * @phba: Pointer to HBA context object. 1923 * @pring: Pointer to driver SLI ring object. 1924 * @tag: buffer tag. 1925 * 1926 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 1927 * is set in the tag the buffer is posted for a particular exchange, 1928 * the function will return the buffer without replacing the buffer. 1929 * If the buffer is for unsolicited ELS or CT traffic, this function 1930 * returns the buffer and also posts another buffer to the firmware. 1931 **/ 1932 static struct lpfc_dmabuf * 1933 lpfc_sli_get_buff(struct lpfc_hba *phba, 1934 struct lpfc_sli_ring *pring, 1935 uint32_t tag) 1936 { 1937 struct hbq_dmabuf *hbq_entry; 1938 1939 if (tag & QUE_BUFTAG_BIT) 1940 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1941 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 1942 if (!hbq_entry) 1943 return NULL; 1944 return &hbq_entry->dbuf; 1945 } 1946 1947 /** 1948 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 1949 * @phba: Pointer to HBA context object. 1950 * @pring: Pointer to driver SLI ring object. 1951 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 1952 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 1953 * @fch_type: the type for the first frame of the sequence. 1954 * 1955 * This function is called with no lock held. This function uses the r_ctl and 1956 * type of the received sequence to find the correct callback function to call 1957 * to process the sequence. 1958 **/ 1959 static int 1960 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1961 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 1962 uint32_t fch_type) 1963 { 1964 int i; 1965 1966 /* unSolicited Responses */ 1967 if (pring->prt[0].profile) { 1968 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 1969 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 1970 saveq); 1971 return 1; 1972 } 1973 /* We must search, based on rctl / type 1974 for the right routine */ 1975 for (i = 0; i < pring->num_mask; i++) { 1976 if ((pring->prt[i].rctl == fch_r_ctl) && 1977 (pring->prt[i].type == fch_type)) { 1978 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 1979 (pring->prt[i].lpfc_sli_rcv_unsol_event) 1980 (phba, pring, saveq); 1981 return 1; 1982 } 1983 } 1984 return 0; 1985 } 1986 1987 /** 1988 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1989 * @phba: Pointer to HBA context object. 1990 * @pring: Pointer to driver SLI ring object. 1991 * @saveq: Pointer to the unsolicited iocb. 1992 * 1993 * This function is called with no lock held by the ring event handler 1994 * when there is an unsolicited iocb posted to the response ring by the 1995 * firmware. This function gets the buffer associated with the iocbs 1996 * and calls the event handler for the ring. This function handles both 1997 * qring buffers and hbq buffers. 1998 * When the function returns 1 the caller can free the iocb object otherwise 1999 * upper layer functions will free the iocb objects. 2000 **/ 2001 static int 2002 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2003 struct lpfc_iocbq *saveq) 2004 { 2005 IOCB_t * irsp; 2006 WORD5 * w5p; 2007 uint32_t Rctl, Type; 2008 uint32_t match; 2009 struct lpfc_iocbq *iocbq; 2010 struct lpfc_dmabuf *dmzbuf; 2011 2012 match = 0; 2013 irsp = &(saveq->iocb); 2014 2015 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2016 if (pring->lpfc_sli_rcv_async_status) 2017 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2018 else 2019 lpfc_printf_log(phba, 2020 KERN_WARNING, 2021 LOG_SLI, 2022 "0316 Ring %d handler: unexpected " 2023 "ASYNC_STATUS iocb received evt_code " 2024 "0x%x\n", 2025 pring->ringno, 2026 irsp->un.asyncstat.evt_code); 2027 return 1; 2028 } 2029 2030 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2031 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2032 if (irsp->ulpBdeCount > 0) { 2033 dmzbuf = lpfc_sli_get_buff(phba, pring, 2034 irsp->un.ulpWord[3]); 2035 lpfc_in_buf_free(phba, dmzbuf); 2036 } 2037 2038 if (irsp->ulpBdeCount > 1) { 2039 dmzbuf = lpfc_sli_get_buff(phba, pring, 2040 irsp->unsli3.sli3Words[3]); 2041 lpfc_in_buf_free(phba, dmzbuf); 2042 } 2043 2044 if (irsp->ulpBdeCount > 2) { 2045 dmzbuf = lpfc_sli_get_buff(phba, pring, 2046 irsp->unsli3.sli3Words[7]); 2047 lpfc_in_buf_free(phba, dmzbuf); 2048 } 2049 2050 return 1; 2051 } 2052 2053 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2054 if (irsp->ulpBdeCount != 0) { 2055 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2056 irsp->un.ulpWord[3]); 2057 if (!saveq->context2) 2058 lpfc_printf_log(phba, 2059 KERN_ERR, 2060 LOG_SLI, 2061 "0341 Ring %d Cannot find buffer for " 2062 "an unsolicited iocb. tag 0x%x\n", 2063 pring->ringno, 2064 irsp->un.ulpWord[3]); 2065 } 2066 if (irsp->ulpBdeCount == 2) { 2067 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2068 irsp->unsli3.sli3Words[7]); 2069 if (!saveq->context3) 2070 lpfc_printf_log(phba, 2071 KERN_ERR, 2072 LOG_SLI, 2073 "0342 Ring %d Cannot find buffer for an" 2074 " unsolicited iocb. tag 0x%x\n", 2075 pring->ringno, 2076 irsp->unsli3.sli3Words[7]); 2077 } 2078 list_for_each_entry(iocbq, &saveq->list, list) { 2079 irsp = &(iocbq->iocb); 2080 if (irsp->ulpBdeCount != 0) { 2081 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2082 irsp->un.ulpWord[3]); 2083 if (!iocbq->context2) 2084 lpfc_printf_log(phba, 2085 KERN_ERR, 2086 LOG_SLI, 2087 "0343 Ring %d Cannot find " 2088 "buffer for an unsolicited iocb" 2089 ". tag 0x%x\n", pring->ringno, 2090 irsp->un.ulpWord[3]); 2091 } 2092 if (irsp->ulpBdeCount == 2) { 2093 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2094 irsp->unsli3.sli3Words[7]); 2095 if (!iocbq->context3) 2096 lpfc_printf_log(phba, 2097 KERN_ERR, 2098 LOG_SLI, 2099 "0344 Ring %d Cannot find " 2100 "buffer for an unsolicited " 2101 "iocb. tag 0x%x\n", 2102 pring->ringno, 2103 irsp->unsli3.sli3Words[7]); 2104 } 2105 } 2106 } 2107 if (irsp->ulpBdeCount != 0 && 2108 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2109 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2110 int found = 0; 2111 2112 /* search continue save q for same XRI */ 2113 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2114 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 2115 list_add_tail(&saveq->list, &iocbq->list); 2116 found = 1; 2117 break; 2118 } 2119 } 2120 if (!found) 2121 list_add_tail(&saveq->clist, 2122 &pring->iocb_continue_saveq); 2123 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2124 list_del_init(&iocbq->clist); 2125 saveq = iocbq; 2126 irsp = &(saveq->iocb); 2127 } else 2128 return 0; 2129 } 2130 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2131 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2132 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2133 Rctl = FC_RCTL_ELS_REQ; 2134 Type = FC_TYPE_ELS; 2135 } else { 2136 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2137 Rctl = w5p->hcsw.Rctl; 2138 Type = w5p->hcsw.Type; 2139 2140 /* Firmware Workaround */ 2141 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2142 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2143 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2144 Rctl = FC_RCTL_ELS_REQ; 2145 Type = FC_TYPE_ELS; 2146 w5p->hcsw.Rctl = Rctl; 2147 w5p->hcsw.Type = Type; 2148 } 2149 } 2150 2151 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2152 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2153 "0313 Ring %d handler: unexpected Rctl x%x " 2154 "Type x%x received\n", 2155 pring->ringno, Rctl, Type); 2156 2157 return 1; 2158 } 2159 2160 /** 2161 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2162 * @phba: Pointer to HBA context object. 2163 * @pring: Pointer to driver SLI ring object. 2164 * @prspiocb: Pointer to response iocb object. 2165 * 2166 * This function looks up the iocb_lookup table to get the command iocb 2167 * corresponding to the given response iocb using the iotag of the 2168 * response iocb. This function is called with the hbalock held. 2169 * This function returns the command iocb object if it finds the command 2170 * iocb else returns NULL. 2171 **/ 2172 static struct lpfc_iocbq * 2173 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2174 struct lpfc_sli_ring *pring, 2175 struct lpfc_iocbq *prspiocb) 2176 { 2177 struct lpfc_iocbq *cmd_iocb = NULL; 2178 uint16_t iotag; 2179 2180 iotag = prspiocb->iocb.ulpIoTag; 2181 2182 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2183 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2184 list_del_init(&cmd_iocb->list); 2185 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2186 pring->txcmplq_cnt--; 2187 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2188 } 2189 return cmd_iocb; 2190 } 2191 2192 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2193 "0317 iotag x%x is out off " 2194 "range: max iotag x%x wd0 x%x\n", 2195 iotag, phba->sli.last_iotag, 2196 *(((uint32_t *) &prspiocb->iocb) + 7)); 2197 return NULL; 2198 } 2199 2200 /** 2201 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2202 * @phba: Pointer to HBA context object. 2203 * @pring: Pointer to driver SLI ring object. 2204 * @iotag: IOCB tag. 2205 * 2206 * This function looks up the iocb_lookup table to get the command iocb 2207 * corresponding to the given iotag. This function is called with the 2208 * hbalock held. 2209 * This function returns the command iocb object if it finds the command 2210 * iocb else returns NULL. 2211 **/ 2212 static struct lpfc_iocbq * 2213 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2214 struct lpfc_sli_ring *pring, uint16_t iotag) 2215 { 2216 struct lpfc_iocbq *cmd_iocb; 2217 2218 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2219 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2220 list_del_init(&cmd_iocb->list); 2221 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2222 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2223 pring->txcmplq_cnt--; 2224 } 2225 return cmd_iocb; 2226 } 2227 2228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2229 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2230 iotag, phba->sli.last_iotag); 2231 return NULL; 2232 } 2233 2234 /** 2235 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2236 * @phba: Pointer to HBA context object. 2237 * @pring: Pointer to driver SLI ring object. 2238 * @saveq: Pointer to the response iocb to be processed. 2239 * 2240 * This function is called by the ring event handler for non-fcp 2241 * rings when there is a new response iocb in the response ring. 2242 * The caller is not required to hold any locks. This function 2243 * gets the command iocb associated with the response iocb and 2244 * calls the completion handler for the command iocb. If there 2245 * is no completion handler, the function will free the resources 2246 * associated with command iocb. If the response iocb is for 2247 * an already aborted command iocb, the status of the completion 2248 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2249 * This function always returns 1. 2250 **/ 2251 static int 2252 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2253 struct lpfc_iocbq *saveq) 2254 { 2255 struct lpfc_iocbq *cmdiocbp; 2256 int rc = 1; 2257 unsigned long iflag; 2258 2259 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2260 spin_lock_irqsave(&phba->hbalock, iflag); 2261 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2262 spin_unlock_irqrestore(&phba->hbalock, iflag); 2263 2264 if (cmdiocbp) { 2265 if (cmdiocbp->iocb_cmpl) { 2266 /* 2267 * If an ELS command failed send an event to mgmt 2268 * application. 2269 */ 2270 if (saveq->iocb.ulpStatus && 2271 (pring->ringno == LPFC_ELS_RING) && 2272 (cmdiocbp->iocb.ulpCommand == 2273 CMD_ELS_REQUEST64_CR)) 2274 lpfc_send_els_failure_event(phba, 2275 cmdiocbp, saveq); 2276 2277 /* 2278 * Post all ELS completions to the worker thread. 2279 * All other are passed to the completion callback. 2280 */ 2281 if (pring->ringno == LPFC_ELS_RING) { 2282 if ((phba->sli_rev < LPFC_SLI_REV4) && 2283 (cmdiocbp->iocb_flag & 2284 LPFC_DRIVER_ABORTED)) { 2285 spin_lock_irqsave(&phba->hbalock, 2286 iflag); 2287 cmdiocbp->iocb_flag &= 2288 ~LPFC_DRIVER_ABORTED; 2289 spin_unlock_irqrestore(&phba->hbalock, 2290 iflag); 2291 saveq->iocb.ulpStatus = 2292 IOSTAT_LOCAL_REJECT; 2293 saveq->iocb.un.ulpWord[4] = 2294 IOERR_SLI_ABORTED; 2295 2296 /* Firmware could still be in progress 2297 * of DMAing payload, so don't free data 2298 * buffer till after a hbeat. 2299 */ 2300 spin_lock_irqsave(&phba->hbalock, 2301 iflag); 2302 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2303 spin_unlock_irqrestore(&phba->hbalock, 2304 iflag); 2305 } 2306 if (phba->sli_rev == LPFC_SLI_REV4) { 2307 if (saveq->iocb_flag & 2308 LPFC_EXCHANGE_BUSY) { 2309 /* Set cmdiocb flag for the 2310 * exchange busy so sgl (xri) 2311 * will not be released until 2312 * the abort xri is received 2313 * from hba. 2314 */ 2315 spin_lock_irqsave( 2316 &phba->hbalock, iflag); 2317 cmdiocbp->iocb_flag |= 2318 LPFC_EXCHANGE_BUSY; 2319 spin_unlock_irqrestore( 2320 &phba->hbalock, iflag); 2321 } 2322 if (cmdiocbp->iocb_flag & 2323 LPFC_DRIVER_ABORTED) { 2324 /* 2325 * Clear LPFC_DRIVER_ABORTED 2326 * bit in case it was driver 2327 * initiated abort. 2328 */ 2329 spin_lock_irqsave( 2330 &phba->hbalock, iflag); 2331 cmdiocbp->iocb_flag &= 2332 ~LPFC_DRIVER_ABORTED; 2333 spin_unlock_irqrestore( 2334 &phba->hbalock, iflag); 2335 cmdiocbp->iocb.ulpStatus = 2336 IOSTAT_LOCAL_REJECT; 2337 cmdiocbp->iocb.un.ulpWord[4] = 2338 IOERR_ABORT_REQUESTED; 2339 /* 2340 * For SLI4, irsiocb contains 2341 * NO_XRI in sli_xritag, it 2342 * shall not affect releasing 2343 * sgl (xri) process. 2344 */ 2345 saveq->iocb.ulpStatus = 2346 IOSTAT_LOCAL_REJECT; 2347 saveq->iocb.un.ulpWord[4] = 2348 IOERR_SLI_ABORTED; 2349 spin_lock_irqsave( 2350 &phba->hbalock, iflag); 2351 saveq->iocb_flag |= 2352 LPFC_DELAY_MEM_FREE; 2353 spin_unlock_irqrestore( 2354 &phba->hbalock, iflag); 2355 } 2356 } 2357 } 2358 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2359 } else 2360 lpfc_sli_release_iocbq(phba, cmdiocbp); 2361 } else { 2362 /* 2363 * Unknown initiating command based on the response iotag. 2364 * This could be the case on the ELS ring because of 2365 * lpfc_els_abort(). 2366 */ 2367 if (pring->ringno != LPFC_ELS_RING) { 2368 /* 2369 * Ring <ringno> handler: unexpected completion IoTag 2370 * <IoTag> 2371 */ 2372 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2373 "0322 Ring %d handler: " 2374 "unexpected completion IoTag x%x " 2375 "Data: x%x x%x x%x x%x\n", 2376 pring->ringno, 2377 saveq->iocb.ulpIoTag, 2378 saveq->iocb.ulpStatus, 2379 saveq->iocb.un.ulpWord[4], 2380 saveq->iocb.ulpCommand, 2381 saveq->iocb.ulpContext); 2382 } 2383 } 2384 2385 return rc; 2386 } 2387 2388 /** 2389 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2390 * @phba: Pointer to HBA context object. 2391 * @pring: Pointer to driver SLI ring object. 2392 * 2393 * This function is called from the iocb ring event handlers when 2394 * put pointer is ahead of the get pointer for a ring. This function signal 2395 * an error attention condition to the worker thread and the worker 2396 * thread will transition the HBA to offline state. 2397 **/ 2398 static void 2399 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2400 { 2401 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2402 /* 2403 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2404 * rsp ring <portRspMax> 2405 */ 2406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2407 "0312 Ring %d handler: portRspPut %d " 2408 "is bigger than rsp ring %d\n", 2409 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2410 pring->numRiocb); 2411 2412 phba->link_state = LPFC_HBA_ERROR; 2413 2414 /* 2415 * All error attention handlers are posted to 2416 * worker thread 2417 */ 2418 phba->work_ha |= HA_ERATT; 2419 phba->work_hs = HS_FFER3; 2420 2421 lpfc_worker_wake_up(phba); 2422 2423 return; 2424 } 2425 2426 /** 2427 * lpfc_poll_eratt - Error attention polling timer timeout handler 2428 * @ptr: Pointer to address of HBA context object. 2429 * 2430 * This function is invoked by the Error Attention polling timer when the 2431 * timer times out. It will check the SLI Error Attention register for 2432 * possible attention events. If so, it will post an Error Attention event 2433 * and wake up worker thread to process it. Otherwise, it will set up the 2434 * Error Attention polling timer for the next poll. 2435 **/ 2436 void lpfc_poll_eratt(unsigned long ptr) 2437 { 2438 struct lpfc_hba *phba; 2439 uint32_t eratt = 0; 2440 2441 phba = (struct lpfc_hba *)ptr; 2442 2443 /* Check chip HA register for error event */ 2444 eratt = lpfc_sli_check_eratt(phba); 2445 2446 if (eratt) 2447 /* Tell the worker thread there is work to do */ 2448 lpfc_worker_wake_up(phba); 2449 else 2450 /* Restart the timer for next eratt poll */ 2451 mod_timer(&phba->eratt_poll, jiffies + 2452 HZ * LPFC_ERATT_POLL_INTERVAL); 2453 return; 2454 } 2455 2456 2457 /** 2458 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2459 * @phba: Pointer to HBA context object. 2460 * @pring: Pointer to driver SLI ring object. 2461 * @mask: Host attention register mask for this ring. 2462 * 2463 * This function is called from the interrupt context when there is a ring 2464 * event for the fcp ring. The caller does not hold any lock. 2465 * The function processes each response iocb in the response ring until it 2466 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with 2467 * LE bit set. The function will call the completion handler of the command iocb 2468 * if the response iocb indicates a completion for a command iocb or it is 2469 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2470 * function if this is an unsolicited iocb. 2471 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2472 * to check it explicitly. 2473 */ 2474 int 2475 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2476 struct lpfc_sli_ring *pring, uint32_t mask) 2477 { 2478 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2479 IOCB_t *irsp = NULL; 2480 IOCB_t *entry = NULL; 2481 struct lpfc_iocbq *cmdiocbq = NULL; 2482 struct lpfc_iocbq rspiocbq; 2483 uint32_t status; 2484 uint32_t portRspPut, portRspMax; 2485 int rc = 1; 2486 lpfc_iocb_type type; 2487 unsigned long iflag; 2488 uint32_t rsp_cmpl = 0; 2489 2490 spin_lock_irqsave(&phba->hbalock, iflag); 2491 pring->stats.iocb_event++; 2492 2493 /* 2494 * The next available response entry should never exceed the maximum 2495 * entries. If it does, treat it as an adapter hardware error. 2496 */ 2497 portRspMax = pring->numRiocb; 2498 portRspPut = le32_to_cpu(pgp->rspPutInx); 2499 if (unlikely(portRspPut >= portRspMax)) { 2500 lpfc_sli_rsp_pointers_error(phba, pring); 2501 spin_unlock_irqrestore(&phba->hbalock, iflag); 2502 return 1; 2503 } 2504 if (phba->fcp_ring_in_use) { 2505 spin_unlock_irqrestore(&phba->hbalock, iflag); 2506 return 1; 2507 } else 2508 phba->fcp_ring_in_use = 1; 2509 2510 rmb(); 2511 while (pring->rspidx != portRspPut) { 2512 /* 2513 * Fetch an entry off the ring and copy it into a local data 2514 * structure. The copy involves a byte-swap since the 2515 * network byte order and pci byte orders are different. 2516 */ 2517 entry = lpfc_resp_iocb(phba, pring); 2518 phba->last_completion_time = jiffies; 2519 2520 if (++pring->rspidx >= portRspMax) 2521 pring->rspidx = 0; 2522 2523 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2524 (uint32_t *) &rspiocbq.iocb, 2525 phba->iocb_rsp_size); 2526 INIT_LIST_HEAD(&(rspiocbq.list)); 2527 irsp = &rspiocbq.iocb; 2528 2529 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2530 pring->stats.iocb_rsp++; 2531 rsp_cmpl++; 2532 2533 if (unlikely(irsp->ulpStatus)) { 2534 /* 2535 * If resource errors reported from HBA, reduce 2536 * queuedepths of the SCSI device. 2537 */ 2538 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2539 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2540 spin_unlock_irqrestore(&phba->hbalock, iflag); 2541 phba->lpfc_rampdown_queue_depth(phba); 2542 spin_lock_irqsave(&phba->hbalock, iflag); 2543 } 2544 2545 /* Rsp ring <ringno> error: IOCB */ 2546 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2547 "0336 Rsp Ring %d error: IOCB Data: " 2548 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 2549 pring->ringno, 2550 irsp->un.ulpWord[0], 2551 irsp->un.ulpWord[1], 2552 irsp->un.ulpWord[2], 2553 irsp->un.ulpWord[3], 2554 irsp->un.ulpWord[4], 2555 irsp->un.ulpWord[5], 2556 *(uint32_t *)&irsp->un1, 2557 *((uint32_t *)&irsp->un1 + 1)); 2558 } 2559 2560 switch (type) { 2561 case LPFC_ABORT_IOCB: 2562 case LPFC_SOL_IOCB: 2563 /* 2564 * Idle exchange closed via ABTS from port. No iocb 2565 * resources need to be recovered. 2566 */ 2567 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 2568 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2569 "0333 IOCB cmd 0x%x" 2570 " processed. Skipping" 2571 " completion\n", 2572 irsp->ulpCommand); 2573 break; 2574 } 2575 2576 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2577 &rspiocbq); 2578 if (unlikely(!cmdiocbq)) 2579 break; 2580 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2581 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2582 if (cmdiocbq->iocb_cmpl) { 2583 spin_unlock_irqrestore(&phba->hbalock, iflag); 2584 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2585 &rspiocbq); 2586 spin_lock_irqsave(&phba->hbalock, iflag); 2587 } 2588 break; 2589 case LPFC_UNSOL_IOCB: 2590 spin_unlock_irqrestore(&phba->hbalock, iflag); 2591 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 2592 spin_lock_irqsave(&phba->hbalock, iflag); 2593 break; 2594 default: 2595 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2596 char adaptermsg[LPFC_MAX_ADPTMSG]; 2597 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2598 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2599 MAX_MSG_DATA); 2600 dev_warn(&((phba->pcidev)->dev), 2601 "lpfc%d: %s\n", 2602 phba->brd_no, adaptermsg); 2603 } else { 2604 /* Unknown IOCB command */ 2605 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2606 "0334 Unknown IOCB command " 2607 "Data: x%x, x%x x%x x%x x%x\n", 2608 type, irsp->ulpCommand, 2609 irsp->ulpStatus, 2610 irsp->ulpIoTag, 2611 irsp->ulpContext); 2612 } 2613 break; 2614 } 2615 2616 /* 2617 * The response IOCB has been processed. Update the ring 2618 * pointer in SLIM. If the port response put pointer has not 2619 * been updated, sync the pgp->rspPutInx and fetch the new port 2620 * response put pointer. 2621 */ 2622 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2623 2624 if (pring->rspidx == portRspPut) 2625 portRspPut = le32_to_cpu(pgp->rspPutInx); 2626 } 2627 2628 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 2629 pring->stats.iocb_rsp_full++; 2630 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2631 writel(status, phba->CAregaddr); 2632 readl(phba->CAregaddr); 2633 } 2634 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 2635 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 2636 pring->stats.iocb_cmd_empty++; 2637 2638 /* Force update of the local copy of cmdGetInx */ 2639 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 2640 lpfc_sli_resume_iocb(phba, pring); 2641 2642 if ((pring->lpfc_sli_cmd_available)) 2643 (pring->lpfc_sli_cmd_available) (phba, pring); 2644 2645 } 2646 2647 phba->fcp_ring_in_use = 0; 2648 spin_unlock_irqrestore(&phba->hbalock, iflag); 2649 return rc; 2650 } 2651 2652 /** 2653 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 2654 * @phba: Pointer to HBA context object. 2655 * @pring: Pointer to driver SLI ring object. 2656 * @rspiocbp: Pointer to driver response IOCB object. 2657 * 2658 * This function is called from the worker thread when there is a slow-path 2659 * response IOCB to process. This function chains all the response iocbs until 2660 * seeing the iocb with the LE bit set. The function will call 2661 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 2662 * completion of a command iocb. The function will call the 2663 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 2664 * The function frees the resources or calls the completion handler if this 2665 * iocb is an abort completion. The function returns NULL when the response 2666 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 2667 * this function shall chain the iocb on to the iocb_continueq and return the 2668 * response iocb passed in. 2669 **/ 2670 static struct lpfc_iocbq * 2671 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2672 struct lpfc_iocbq *rspiocbp) 2673 { 2674 struct lpfc_iocbq *saveq; 2675 struct lpfc_iocbq *cmdiocbp; 2676 struct lpfc_iocbq *next_iocb; 2677 IOCB_t *irsp = NULL; 2678 uint32_t free_saveq; 2679 uint8_t iocb_cmd_type; 2680 lpfc_iocb_type type; 2681 unsigned long iflag; 2682 int rc; 2683 2684 spin_lock_irqsave(&phba->hbalock, iflag); 2685 /* First add the response iocb to the countinueq list */ 2686 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2687 pring->iocb_continueq_cnt++; 2688 2689 /* Now, determine whetehr the list is completed for processing */ 2690 irsp = &rspiocbp->iocb; 2691 if (irsp->ulpLe) { 2692 /* 2693 * By default, the driver expects to free all resources 2694 * associated with this iocb completion. 2695 */ 2696 free_saveq = 1; 2697 saveq = list_get_first(&pring->iocb_continueq, 2698 struct lpfc_iocbq, list); 2699 irsp = &(saveq->iocb); 2700 list_del_init(&pring->iocb_continueq); 2701 pring->iocb_continueq_cnt = 0; 2702 2703 pring->stats.iocb_rsp++; 2704 2705 /* 2706 * If resource errors reported from HBA, reduce 2707 * queuedepths of the SCSI device. 2708 */ 2709 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2710 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2711 spin_unlock_irqrestore(&phba->hbalock, iflag); 2712 phba->lpfc_rampdown_queue_depth(phba); 2713 spin_lock_irqsave(&phba->hbalock, iflag); 2714 } 2715 2716 if (irsp->ulpStatus) { 2717 /* Rsp ring <ringno> error: IOCB */ 2718 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2719 "0328 Rsp Ring %d error: " 2720 "IOCB Data: " 2721 "x%x x%x x%x x%x " 2722 "x%x x%x x%x x%x " 2723 "x%x x%x x%x x%x " 2724 "x%x x%x x%x x%x\n", 2725 pring->ringno, 2726 irsp->un.ulpWord[0], 2727 irsp->un.ulpWord[1], 2728 irsp->un.ulpWord[2], 2729 irsp->un.ulpWord[3], 2730 irsp->un.ulpWord[4], 2731 irsp->un.ulpWord[5], 2732 *(((uint32_t *) irsp) + 6), 2733 *(((uint32_t *) irsp) + 7), 2734 *(((uint32_t *) irsp) + 8), 2735 *(((uint32_t *) irsp) + 9), 2736 *(((uint32_t *) irsp) + 10), 2737 *(((uint32_t *) irsp) + 11), 2738 *(((uint32_t *) irsp) + 12), 2739 *(((uint32_t *) irsp) + 13), 2740 *(((uint32_t *) irsp) + 14), 2741 *(((uint32_t *) irsp) + 15)); 2742 } 2743 2744 /* 2745 * Fetch the IOCB command type and call the correct completion 2746 * routine. Solicited and Unsolicited IOCBs on the ELS ring 2747 * get freed back to the lpfc_iocb_list by the discovery 2748 * kernel thread. 2749 */ 2750 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 2751 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 2752 switch (type) { 2753 case LPFC_SOL_IOCB: 2754 spin_unlock_irqrestore(&phba->hbalock, iflag); 2755 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 2756 spin_lock_irqsave(&phba->hbalock, iflag); 2757 break; 2758 2759 case LPFC_UNSOL_IOCB: 2760 spin_unlock_irqrestore(&phba->hbalock, iflag); 2761 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 2762 spin_lock_irqsave(&phba->hbalock, iflag); 2763 if (!rc) 2764 free_saveq = 0; 2765 break; 2766 2767 case LPFC_ABORT_IOCB: 2768 cmdiocbp = NULL; 2769 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 2770 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 2771 saveq); 2772 if (cmdiocbp) { 2773 /* Call the specified completion routine */ 2774 if (cmdiocbp->iocb_cmpl) { 2775 spin_unlock_irqrestore(&phba->hbalock, 2776 iflag); 2777 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 2778 saveq); 2779 spin_lock_irqsave(&phba->hbalock, 2780 iflag); 2781 } else 2782 __lpfc_sli_release_iocbq(phba, 2783 cmdiocbp); 2784 } 2785 break; 2786 2787 case LPFC_UNKNOWN_IOCB: 2788 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2789 char adaptermsg[LPFC_MAX_ADPTMSG]; 2790 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2791 memcpy(&adaptermsg[0], (uint8_t *)irsp, 2792 MAX_MSG_DATA); 2793 dev_warn(&((phba->pcidev)->dev), 2794 "lpfc%d: %s\n", 2795 phba->brd_no, adaptermsg); 2796 } else { 2797 /* Unknown IOCB command */ 2798 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2799 "0335 Unknown IOCB " 2800 "command Data: x%x " 2801 "x%x x%x x%x\n", 2802 irsp->ulpCommand, 2803 irsp->ulpStatus, 2804 irsp->ulpIoTag, 2805 irsp->ulpContext); 2806 } 2807 break; 2808 } 2809 2810 if (free_saveq) { 2811 list_for_each_entry_safe(rspiocbp, next_iocb, 2812 &saveq->list, list) { 2813 list_del(&rspiocbp->list); 2814 __lpfc_sli_release_iocbq(phba, rspiocbp); 2815 } 2816 __lpfc_sli_release_iocbq(phba, saveq); 2817 } 2818 rspiocbp = NULL; 2819 } 2820 spin_unlock_irqrestore(&phba->hbalock, iflag); 2821 return rspiocbp; 2822 } 2823 2824 /** 2825 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 2826 * @phba: Pointer to HBA context object. 2827 * @pring: Pointer to driver SLI ring object. 2828 * @mask: Host attention register mask for this ring. 2829 * 2830 * This routine wraps the actual slow_ring event process routine from the 2831 * API jump table function pointer from the lpfc_hba struct. 2832 **/ 2833 void 2834 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2835 struct lpfc_sli_ring *pring, uint32_t mask) 2836 { 2837 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 2838 } 2839 2840 /** 2841 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 2842 * @phba: Pointer to HBA context object. 2843 * @pring: Pointer to driver SLI ring object. 2844 * @mask: Host attention register mask for this ring. 2845 * 2846 * This function is called from the worker thread when there is a ring event 2847 * for non-fcp rings. The caller does not hold any lock. The function will 2848 * remove each response iocb in the response ring and calls the handle 2849 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 2850 **/ 2851 static void 2852 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 2853 struct lpfc_sli_ring *pring, uint32_t mask) 2854 { 2855 struct lpfc_pgp *pgp; 2856 IOCB_t *entry; 2857 IOCB_t *irsp = NULL; 2858 struct lpfc_iocbq *rspiocbp = NULL; 2859 uint32_t portRspPut, portRspMax; 2860 unsigned long iflag; 2861 uint32_t status; 2862 2863 pgp = &phba->port_gp[pring->ringno]; 2864 spin_lock_irqsave(&phba->hbalock, iflag); 2865 pring->stats.iocb_event++; 2866 2867 /* 2868 * The next available response entry should never exceed the maximum 2869 * entries. If it does, treat it as an adapter hardware error. 2870 */ 2871 portRspMax = pring->numRiocb; 2872 portRspPut = le32_to_cpu(pgp->rspPutInx); 2873 if (portRspPut >= portRspMax) { 2874 /* 2875 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2876 * rsp ring <portRspMax> 2877 */ 2878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2879 "0303 Ring %d handler: portRspPut %d " 2880 "is bigger than rsp ring %d\n", 2881 pring->ringno, portRspPut, portRspMax); 2882 2883 phba->link_state = LPFC_HBA_ERROR; 2884 spin_unlock_irqrestore(&phba->hbalock, iflag); 2885 2886 phba->work_hs = HS_FFER3; 2887 lpfc_handle_eratt(phba); 2888 2889 return; 2890 } 2891 2892 rmb(); 2893 while (pring->rspidx != portRspPut) { 2894 /* 2895 * Build a completion list and call the appropriate handler. 2896 * The process is to get the next available response iocb, get 2897 * a free iocb from the list, copy the response data into the 2898 * free iocb, insert to the continuation list, and update the 2899 * next response index to slim. This process makes response 2900 * iocb's in the ring available to DMA as fast as possible but 2901 * pays a penalty for a copy operation. Since the iocb is 2902 * only 32 bytes, this penalty is considered small relative to 2903 * the PCI reads for register values and a slim write. When 2904 * the ulpLe field is set, the entire Command has been 2905 * received. 2906 */ 2907 entry = lpfc_resp_iocb(phba, pring); 2908 2909 phba->last_completion_time = jiffies; 2910 rspiocbp = __lpfc_sli_get_iocbq(phba); 2911 if (rspiocbp == NULL) { 2912 printk(KERN_ERR "%s: out of buffers! Failing " 2913 "completion.\n", __func__); 2914 break; 2915 } 2916 2917 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 2918 phba->iocb_rsp_size); 2919 irsp = &rspiocbp->iocb; 2920 2921 if (++pring->rspidx >= portRspMax) 2922 pring->rspidx = 0; 2923 2924 if (pring->ringno == LPFC_ELS_RING) { 2925 lpfc_debugfs_slow_ring_trc(phba, 2926 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 2927 *(((uint32_t *) irsp) + 4), 2928 *(((uint32_t *) irsp) + 6), 2929 *(((uint32_t *) irsp) + 7)); 2930 } 2931 2932 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2933 2934 spin_unlock_irqrestore(&phba->hbalock, iflag); 2935 /* Handle the response IOCB */ 2936 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 2937 spin_lock_irqsave(&phba->hbalock, iflag); 2938 2939 /* 2940 * If the port response put pointer has not been updated, sync 2941 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 2942 * response put pointer. 2943 */ 2944 if (pring->rspidx == portRspPut) { 2945 portRspPut = le32_to_cpu(pgp->rspPutInx); 2946 } 2947 } /* while (pring->rspidx != portRspPut) */ 2948 2949 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 2950 /* At least one response entry has been freed */ 2951 pring->stats.iocb_rsp_full++; 2952 /* SET RxRE_RSP in Chip Att register */ 2953 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2954 writel(status, phba->CAregaddr); 2955 readl(phba->CAregaddr); /* flush */ 2956 } 2957 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 2958 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 2959 pring->stats.iocb_cmd_empty++; 2960 2961 /* Force update of the local copy of cmdGetInx */ 2962 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 2963 lpfc_sli_resume_iocb(phba, pring); 2964 2965 if ((pring->lpfc_sli_cmd_available)) 2966 (pring->lpfc_sli_cmd_available) (phba, pring); 2967 2968 } 2969 2970 spin_unlock_irqrestore(&phba->hbalock, iflag); 2971 return; 2972 } 2973 2974 /** 2975 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 2976 * @phba: Pointer to HBA context object. 2977 * @pring: Pointer to driver SLI ring object. 2978 * @mask: Host attention register mask for this ring. 2979 * 2980 * This function is called from the worker thread when there is a pending 2981 * ELS response iocb on the driver internal slow-path response iocb worker 2982 * queue. The caller does not hold any lock. The function will remove each 2983 * response iocb from the response worker queue and calls the handle 2984 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 2985 **/ 2986 static void 2987 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 2988 struct lpfc_sli_ring *pring, uint32_t mask) 2989 { 2990 struct lpfc_iocbq *irspiocbq; 2991 struct hbq_dmabuf *dmabuf; 2992 struct lpfc_cq_event *cq_event; 2993 unsigned long iflag; 2994 2995 spin_lock_irqsave(&phba->hbalock, iflag); 2996 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 2997 spin_unlock_irqrestore(&phba->hbalock, iflag); 2998 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 2999 /* Get the response iocb from the head of work queue */ 3000 spin_lock_irqsave(&phba->hbalock, iflag); 3001 list_remove_head(&phba->sli4_hba.sp_queue_event, 3002 cq_event, struct lpfc_cq_event, list); 3003 spin_unlock_irqrestore(&phba->hbalock, iflag); 3004 3005 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3006 case CQE_CODE_COMPL_WQE: 3007 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3008 cq_event); 3009 /* Translate ELS WCQE to response IOCBQ */ 3010 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3011 irspiocbq); 3012 if (irspiocbq) 3013 lpfc_sli_sp_handle_rspiocb(phba, pring, 3014 irspiocbq); 3015 break; 3016 case CQE_CODE_RECEIVE: 3017 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3018 cq_event); 3019 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3020 break; 3021 default: 3022 break; 3023 } 3024 } 3025 } 3026 3027 /** 3028 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3029 * @phba: Pointer to HBA context object. 3030 * @pring: Pointer to driver SLI ring object. 3031 * 3032 * This function aborts all iocbs in the given ring and frees all the iocb 3033 * objects in txq. This function issues an abort iocb for all the iocb commands 3034 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3035 * the return of this function. The caller is not required to hold any locks. 3036 **/ 3037 void 3038 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3039 { 3040 LIST_HEAD(completions); 3041 struct lpfc_iocbq *iocb, *next_iocb; 3042 3043 if (pring->ringno == LPFC_ELS_RING) { 3044 lpfc_fabric_abort_hba(phba); 3045 } 3046 3047 /* Error everything on txq and txcmplq 3048 * First do the txq. 3049 */ 3050 spin_lock_irq(&phba->hbalock); 3051 list_splice_init(&pring->txq, &completions); 3052 pring->txq_cnt = 0; 3053 3054 /* Next issue ABTS for everything on the txcmplq */ 3055 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3056 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3057 3058 spin_unlock_irq(&phba->hbalock); 3059 3060 /* Cancel all the IOCBs from the completions list */ 3061 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3062 IOERR_SLI_ABORTED); 3063 } 3064 3065 /** 3066 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3067 * @phba: Pointer to HBA context object. 3068 * 3069 * This function flushes all iocbs in the fcp ring and frees all the iocb 3070 * objects in txq and txcmplq. This function will not issue abort iocbs 3071 * for all the iocb commands in txcmplq, they will just be returned with 3072 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3073 * slot has been permanently disabled. 3074 **/ 3075 void 3076 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3077 { 3078 LIST_HEAD(txq); 3079 LIST_HEAD(txcmplq); 3080 struct lpfc_sli *psli = &phba->sli; 3081 struct lpfc_sli_ring *pring; 3082 3083 /* Currently, only one fcp ring */ 3084 pring = &psli->ring[psli->fcp_ring]; 3085 3086 spin_lock_irq(&phba->hbalock); 3087 /* Retrieve everything on txq */ 3088 list_splice_init(&pring->txq, &txq); 3089 pring->txq_cnt = 0; 3090 3091 /* Retrieve everything on the txcmplq */ 3092 list_splice_init(&pring->txcmplq, &txcmplq); 3093 pring->txcmplq_cnt = 0; 3094 spin_unlock_irq(&phba->hbalock); 3095 3096 /* Flush the txq */ 3097 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3098 IOERR_SLI_DOWN); 3099 3100 /* Flush the txcmpq */ 3101 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3102 IOERR_SLI_DOWN); 3103 } 3104 3105 /** 3106 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3107 * @phba: Pointer to HBA context object. 3108 * @mask: Bit mask to be checked. 3109 * 3110 * This function reads the host status register and compares 3111 * with the provided bit mask to check if HBA completed 3112 * the restart. This function will wait in a loop for the 3113 * HBA to complete restart. If the HBA does not restart within 3114 * 15 iterations, the function will reset the HBA again. The 3115 * function returns 1 when HBA fail to restart otherwise returns 3116 * zero. 3117 **/ 3118 static int 3119 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3120 { 3121 uint32_t status; 3122 int i = 0; 3123 int retval = 0; 3124 3125 /* Read the HBA Host Status Register */ 3126 status = readl(phba->HSregaddr); 3127 3128 /* 3129 * Check status register every 100ms for 5 retries, then every 3130 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3131 * every 2.5 sec for 4. 3132 * Break our of the loop if errors occurred during init. 3133 */ 3134 while (((status & mask) != mask) && 3135 !(status & HS_FFERM) && 3136 i++ < 20) { 3137 3138 if (i <= 5) 3139 msleep(10); 3140 else if (i <= 10) 3141 msleep(500); 3142 else 3143 msleep(2500); 3144 3145 if (i == 15) { 3146 /* Do post */ 3147 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3148 lpfc_sli_brdrestart(phba); 3149 } 3150 /* Read the HBA Host Status Register */ 3151 status = readl(phba->HSregaddr); 3152 } 3153 3154 /* Check to see if any errors occurred during init */ 3155 if ((status & HS_FFERM) || (i >= 20)) { 3156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3157 "2751 Adapter failed to restart, " 3158 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3159 status, 3160 readl(phba->MBslimaddr + 0xa8), 3161 readl(phba->MBslimaddr + 0xac)); 3162 phba->link_state = LPFC_HBA_ERROR; 3163 retval = 1; 3164 } 3165 3166 return retval; 3167 } 3168 3169 /** 3170 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3171 * @phba: Pointer to HBA context object. 3172 * @mask: Bit mask to be checked. 3173 * 3174 * This function checks the host status register to check if HBA is 3175 * ready. This function will wait in a loop for the HBA to be ready 3176 * If the HBA is not ready , the function will will reset the HBA PCI 3177 * function again. The function returns 1 when HBA fail to be ready 3178 * otherwise returns zero. 3179 **/ 3180 static int 3181 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3182 { 3183 uint32_t status; 3184 int retval = 0; 3185 3186 /* Read the HBA Host Status Register */ 3187 status = lpfc_sli4_post_status_check(phba); 3188 3189 if (status) { 3190 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3191 lpfc_sli_brdrestart(phba); 3192 status = lpfc_sli4_post_status_check(phba); 3193 } 3194 3195 /* Check to see if any errors occurred during init */ 3196 if (status) { 3197 phba->link_state = LPFC_HBA_ERROR; 3198 retval = 1; 3199 } else 3200 phba->sli4_hba.intr_enable = 0; 3201 3202 return retval; 3203 } 3204 3205 /** 3206 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3207 * @phba: Pointer to HBA context object. 3208 * @mask: Bit mask to be checked. 3209 * 3210 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3211 * from the API jump table function pointer from the lpfc_hba struct. 3212 **/ 3213 int 3214 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3215 { 3216 return phba->lpfc_sli_brdready(phba, mask); 3217 } 3218 3219 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3220 3221 /** 3222 * lpfc_reset_barrier - Make HBA ready for HBA reset 3223 * @phba: Pointer to HBA context object. 3224 * 3225 * This function is called before resetting an HBA. This 3226 * function requests HBA to quiesce DMAs before a reset. 3227 **/ 3228 void lpfc_reset_barrier(struct lpfc_hba *phba) 3229 { 3230 uint32_t __iomem *resp_buf; 3231 uint32_t __iomem *mbox_buf; 3232 volatile uint32_t mbox; 3233 uint32_t hc_copy; 3234 int i; 3235 uint8_t hdrtype; 3236 3237 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3238 if (hdrtype != 0x80 || 3239 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3240 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3241 return; 3242 3243 /* 3244 * Tell the other part of the chip to suspend temporarily all 3245 * its DMA activity. 3246 */ 3247 resp_buf = phba->MBslimaddr; 3248 3249 /* Disable the error attention */ 3250 hc_copy = readl(phba->HCregaddr); 3251 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3252 readl(phba->HCregaddr); /* flush */ 3253 phba->link_flag |= LS_IGNORE_ERATT; 3254 3255 if (readl(phba->HAregaddr) & HA_ERATT) { 3256 /* Clear Chip error bit */ 3257 writel(HA_ERATT, phba->HAregaddr); 3258 phba->pport->stopped = 1; 3259 } 3260 3261 mbox = 0; 3262 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3263 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3264 3265 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3266 mbox_buf = phba->MBslimaddr; 3267 writel(mbox, mbox_buf); 3268 3269 for (i = 0; 3270 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 3271 mdelay(1); 3272 3273 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3275 phba->pport->stopped) 3276 goto restore_hc; 3277 else 3278 goto clear_errat; 3279 } 3280 3281 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3282 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 3283 mdelay(1); 3284 3285 clear_errat: 3286 3287 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 3288 mdelay(1); 3289 3290 if (readl(phba->HAregaddr) & HA_ERATT) { 3291 writel(HA_ERATT, phba->HAregaddr); 3292 phba->pport->stopped = 1; 3293 } 3294 3295 restore_hc: 3296 phba->link_flag &= ~LS_IGNORE_ERATT; 3297 writel(hc_copy, phba->HCregaddr); 3298 readl(phba->HCregaddr); /* flush */ 3299 } 3300 3301 /** 3302 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3303 * @phba: Pointer to HBA context object. 3304 * 3305 * This function issues a kill_board mailbox command and waits for 3306 * the error attention interrupt. This function is called for stopping 3307 * the firmware processing. The caller is not required to hold any 3308 * locks. This function calls lpfc_hba_down_post function to free 3309 * any pending commands after the kill. The function will return 1 when it 3310 * fails to kill the board else will return 0. 3311 **/ 3312 int 3313 lpfc_sli_brdkill(struct lpfc_hba *phba) 3314 { 3315 struct lpfc_sli *psli; 3316 LPFC_MBOXQ_t *pmb; 3317 uint32_t status; 3318 uint32_t ha_copy; 3319 int retval; 3320 int i = 0; 3321 3322 psli = &phba->sli; 3323 3324 /* Kill HBA */ 3325 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3326 "0329 Kill HBA Data: x%x x%x\n", 3327 phba->pport->port_state, psli->sli_flag); 3328 3329 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3330 if (!pmb) 3331 return 1; 3332 3333 /* Disable the error attention */ 3334 spin_lock_irq(&phba->hbalock); 3335 status = readl(phba->HCregaddr); 3336 status &= ~HC_ERINT_ENA; 3337 writel(status, phba->HCregaddr); 3338 readl(phba->HCregaddr); /* flush */ 3339 phba->link_flag |= LS_IGNORE_ERATT; 3340 spin_unlock_irq(&phba->hbalock); 3341 3342 lpfc_kill_board(phba, pmb); 3343 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3344 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3345 3346 if (retval != MBX_SUCCESS) { 3347 if (retval != MBX_BUSY) 3348 mempool_free(pmb, phba->mbox_mem_pool); 3349 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3350 "2752 KILL_BOARD command failed retval %d\n", 3351 retval); 3352 spin_lock_irq(&phba->hbalock); 3353 phba->link_flag &= ~LS_IGNORE_ERATT; 3354 spin_unlock_irq(&phba->hbalock); 3355 return 1; 3356 } 3357 3358 spin_lock_irq(&phba->hbalock); 3359 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3360 spin_unlock_irq(&phba->hbalock); 3361 3362 mempool_free(pmb, phba->mbox_mem_pool); 3363 3364 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3365 * attention every 100ms for 3 seconds. If we don't get ERATT after 3366 * 3 seconds we still set HBA_ERROR state because the status of the 3367 * board is now undefined. 3368 */ 3369 ha_copy = readl(phba->HAregaddr); 3370 3371 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3372 mdelay(100); 3373 ha_copy = readl(phba->HAregaddr); 3374 } 3375 3376 del_timer_sync(&psli->mbox_tmo); 3377 if (ha_copy & HA_ERATT) { 3378 writel(HA_ERATT, phba->HAregaddr); 3379 phba->pport->stopped = 1; 3380 } 3381 spin_lock_irq(&phba->hbalock); 3382 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3383 psli->mbox_active = NULL; 3384 phba->link_flag &= ~LS_IGNORE_ERATT; 3385 spin_unlock_irq(&phba->hbalock); 3386 3387 lpfc_hba_down_post(phba); 3388 phba->link_state = LPFC_HBA_ERROR; 3389 3390 return ha_copy & HA_ERATT ? 0 : 1; 3391 } 3392 3393 /** 3394 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3395 * @phba: Pointer to HBA context object. 3396 * 3397 * This function resets the HBA by writing HC_INITFF to the control 3398 * register. After the HBA resets, this function resets all the iocb ring 3399 * indices. This function disables PCI layer parity checking during 3400 * the reset. 3401 * This function returns 0 always. 3402 * The caller is not required to hold any locks. 3403 **/ 3404 int 3405 lpfc_sli_brdreset(struct lpfc_hba *phba) 3406 { 3407 struct lpfc_sli *psli; 3408 struct lpfc_sli_ring *pring; 3409 uint16_t cfg_value; 3410 int i; 3411 3412 psli = &phba->sli; 3413 3414 /* Reset HBA */ 3415 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3416 "0325 Reset HBA Data: x%x x%x\n", 3417 phba->pport->port_state, psli->sli_flag); 3418 3419 /* perform board reset */ 3420 phba->fc_eventTag = 0; 3421 phba->link_events = 0; 3422 phba->pport->fc_myDID = 0; 3423 phba->pport->fc_prevDID = 0; 3424 3425 /* Turn off parity checking and serr during the physical reset */ 3426 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3427 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3428 (cfg_value & 3429 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3430 3431 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3432 3433 /* Now toggle INITFF bit in the Host Control Register */ 3434 writel(HC_INITFF, phba->HCregaddr); 3435 mdelay(1); 3436 readl(phba->HCregaddr); /* flush */ 3437 writel(0, phba->HCregaddr); 3438 readl(phba->HCregaddr); /* flush */ 3439 3440 /* Restore PCI cmd register */ 3441 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3442 3443 /* Initialize relevant SLI info */ 3444 for (i = 0; i < psli->num_rings; i++) { 3445 pring = &psli->ring[i]; 3446 pring->flag = 0; 3447 pring->rspidx = 0; 3448 pring->next_cmdidx = 0; 3449 pring->local_getidx = 0; 3450 pring->cmdidx = 0; 3451 pring->missbufcnt = 0; 3452 } 3453 3454 phba->link_state = LPFC_WARM_START; 3455 return 0; 3456 } 3457 3458 /** 3459 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3460 * @phba: Pointer to HBA context object. 3461 * 3462 * This function resets a SLI4 HBA. This function disables PCI layer parity 3463 * checking during resets the device. The caller is not required to hold 3464 * any locks. 3465 * 3466 * This function returns 0 always. 3467 **/ 3468 int 3469 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3470 { 3471 struct lpfc_sli *psli = &phba->sli; 3472 uint16_t cfg_value; 3473 uint8_t qindx; 3474 3475 /* Reset HBA */ 3476 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3477 "0295 Reset HBA Data: x%x x%x\n", 3478 phba->pport->port_state, psli->sli_flag); 3479 3480 /* perform board reset */ 3481 phba->fc_eventTag = 0; 3482 phba->link_events = 0; 3483 phba->pport->fc_myDID = 0; 3484 phba->pport->fc_prevDID = 0; 3485 3486 /* Turn off parity checking and serr during the physical reset */ 3487 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3488 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3489 (cfg_value & 3490 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3491 3492 spin_lock_irq(&phba->hbalock); 3493 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3494 phba->fcf.fcf_flag = 0; 3495 /* Clean up the child queue list for the CQs */ 3496 list_del_init(&phba->sli4_hba.mbx_wq->list); 3497 list_del_init(&phba->sli4_hba.els_wq->list); 3498 list_del_init(&phba->sli4_hba.hdr_rq->list); 3499 list_del_init(&phba->sli4_hba.dat_rq->list); 3500 list_del_init(&phba->sli4_hba.mbx_cq->list); 3501 list_del_init(&phba->sli4_hba.els_cq->list); 3502 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3503 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3504 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3505 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3506 spin_unlock_irq(&phba->hbalock); 3507 3508 /* Now physically reset the device */ 3509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3510 "0389 Performing PCI function reset!\n"); 3511 /* Perform FCoE PCI function reset */ 3512 lpfc_pci_function_reset(phba); 3513 3514 return 0; 3515 } 3516 3517 /** 3518 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3519 * @phba: Pointer to HBA context object. 3520 * 3521 * This function is called in the SLI initialization code path to 3522 * restart the HBA. The caller is not required to hold any lock. 3523 * This function writes MBX_RESTART mailbox command to the SLIM and 3524 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 3525 * function to free any pending commands. The function enables 3526 * POST only during the first initialization. The function returns zero. 3527 * The function does not guarantee completion of MBX_RESTART mailbox 3528 * command before the return of this function. 3529 **/ 3530 static int 3531 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 3532 { 3533 MAILBOX_t *mb; 3534 struct lpfc_sli *psli; 3535 volatile uint32_t word0; 3536 void __iomem *to_slim; 3537 uint32_t hba_aer_enabled; 3538 3539 spin_lock_irq(&phba->hbalock); 3540 3541 /* Take PCIe device Advanced Error Reporting (AER) state */ 3542 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3543 3544 psli = &phba->sli; 3545 3546 /* Restart HBA */ 3547 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3548 "0337 Restart HBA Data: x%x x%x\n", 3549 phba->pport->port_state, psli->sli_flag); 3550 3551 word0 = 0; 3552 mb = (MAILBOX_t *) &word0; 3553 mb->mbxCommand = MBX_RESTART; 3554 mb->mbxHc = 1; 3555 3556 lpfc_reset_barrier(phba); 3557 3558 to_slim = phba->MBslimaddr; 3559 writel(*(uint32_t *) mb, to_slim); 3560 readl(to_slim); /* flush */ 3561 3562 /* Only skip post after fc_ffinit is completed */ 3563 if (phba->pport->port_state) 3564 word0 = 1; /* This is really setting up word1 */ 3565 else 3566 word0 = 0; /* This is really setting up word1 */ 3567 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3568 writel(*(uint32_t *) mb, to_slim); 3569 readl(to_slim); /* flush */ 3570 3571 lpfc_sli_brdreset(phba); 3572 phba->pport->stopped = 0; 3573 phba->link_state = LPFC_INIT_START; 3574 phba->hba_flag = 0; 3575 spin_unlock_irq(&phba->hbalock); 3576 3577 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3578 psli->stats_start = get_seconds(); 3579 3580 /* Give the INITFF and Post time to settle. */ 3581 mdelay(100); 3582 3583 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3584 if (hba_aer_enabled) 3585 pci_disable_pcie_error_reporting(phba->pcidev); 3586 3587 lpfc_hba_down_post(phba); 3588 3589 return 0; 3590 } 3591 3592 /** 3593 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 3594 * @phba: Pointer to HBA context object. 3595 * 3596 * This function is called in the SLI initialization code path to restart 3597 * a SLI4 HBA. The caller is not required to hold any lock. 3598 * At the end of the function, it calls lpfc_hba_down_post function to 3599 * free any pending commands. 3600 **/ 3601 static int 3602 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 3603 { 3604 struct lpfc_sli *psli = &phba->sli; 3605 uint32_t hba_aer_enabled; 3606 3607 /* Restart HBA */ 3608 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3609 "0296 Restart HBA Data: x%x x%x\n", 3610 phba->pport->port_state, psli->sli_flag); 3611 3612 /* Take PCIe device Advanced Error Reporting (AER) state */ 3613 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3614 3615 lpfc_sli4_brdreset(phba); 3616 3617 spin_lock_irq(&phba->hbalock); 3618 phba->pport->stopped = 0; 3619 phba->link_state = LPFC_INIT_START; 3620 phba->hba_flag = 0; 3621 spin_unlock_irq(&phba->hbalock); 3622 3623 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3624 psli->stats_start = get_seconds(); 3625 3626 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3627 if (hba_aer_enabled) 3628 pci_disable_pcie_error_reporting(phba->pcidev); 3629 3630 lpfc_hba_down_post(phba); 3631 3632 return 0; 3633 } 3634 3635 /** 3636 * lpfc_sli_brdrestart - Wrapper func for restarting hba 3637 * @phba: Pointer to HBA context object. 3638 * 3639 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 3640 * API jump table function pointer from the lpfc_hba struct. 3641 **/ 3642 int 3643 lpfc_sli_brdrestart(struct lpfc_hba *phba) 3644 { 3645 return phba->lpfc_sli_brdrestart(phba); 3646 } 3647 3648 /** 3649 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3650 * @phba: Pointer to HBA context object. 3651 * 3652 * This function is called after a HBA restart to wait for successful 3653 * restart of the HBA. Successful restart of the HBA is indicated by 3654 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 3655 * iteration, the function will restart the HBA again. The function returns 3656 * zero if HBA successfully restarted else returns negative error code. 3657 **/ 3658 static int 3659 lpfc_sli_chipset_init(struct lpfc_hba *phba) 3660 { 3661 uint32_t status, i = 0; 3662 3663 /* Read the HBA Host Status Register */ 3664 status = readl(phba->HSregaddr); 3665 3666 /* Check status register to see what current state is */ 3667 i = 0; 3668 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 3669 3670 /* Check every 10ms for 10 retries, then every 100ms for 90 3671 * retries, then every 1 sec for 50 retires for a total of 3672 * ~60 seconds before reset the board again and check every 3673 * 1 sec for 50 retries. The up to 60 seconds before the 3674 * board ready is required by the Falcon FIPS zeroization 3675 * complete, and any reset the board in between shall cause 3676 * restart of zeroization, further delay the board ready. 3677 */ 3678 if (i++ >= 200) { 3679 /* Adapter failed to init, timeout, status reg 3680 <status> */ 3681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3682 "0436 Adapter failed to init, " 3683 "timeout, status reg x%x, " 3684 "FW Data: A8 x%x AC x%x\n", status, 3685 readl(phba->MBslimaddr + 0xa8), 3686 readl(phba->MBslimaddr + 0xac)); 3687 phba->link_state = LPFC_HBA_ERROR; 3688 return -ETIMEDOUT; 3689 } 3690 3691 /* Check to see if any errors occurred during init */ 3692 if (status & HS_FFERM) { 3693 /* ERROR: During chipset initialization */ 3694 /* Adapter failed to init, chipset, status reg 3695 <status> */ 3696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3697 "0437 Adapter failed to init, " 3698 "chipset, status reg x%x, " 3699 "FW Data: A8 x%x AC x%x\n", status, 3700 readl(phba->MBslimaddr + 0xa8), 3701 readl(phba->MBslimaddr + 0xac)); 3702 phba->link_state = LPFC_HBA_ERROR; 3703 return -EIO; 3704 } 3705 3706 if (i <= 10) 3707 msleep(10); 3708 else if (i <= 100) 3709 msleep(100); 3710 else 3711 msleep(1000); 3712 3713 if (i == 150) { 3714 /* Do post */ 3715 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3716 lpfc_sli_brdrestart(phba); 3717 } 3718 /* Read the HBA Host Status Register */ 3719 status = readl(phba->HSregaddr); 3720 } 3721 3722 /* Check to see if any errors occurred during init */ 3723 if (status & HS_FFERM) { 3724 /* ERROR: During chipset initialization */ 3725 /* Adapter failed to init, chipset, status reg <status> */ 3726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3727 "0438 Adapter failed to init, chipset, " 3728 "status reg x%x, " 3729 "FW Data: A8 x%x AC x%x\n", status, 3730 readl(phba->MBslimaddr + 0xa8), 3731 readl(phba->MBslimaddr + 0xac)); 3732 phba->link_state = LPFC_HBA_ERROR; 3733 return -EIO; 3734 } 3735 3736 /* Clear all interrupt enable conditions */ 3737 writel(0, phba->HCregaddr); 3738 readl(phba->HCregaddr); /* flush */ 3739 3740 /* setup host attn register */ 3741 writel(0xffffffff, phba->HAregaddr); 3742 readl(phba->HAregaddr); /* flush */ 3743 return 0; 3744 } 3745 3746 /** 3747 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 3748 * 3749 * This function calculates and returns the number of HBQs required to be 3750 * configured. 3751 **/ 3752 int 3753 lpfc_sli_hbq_count(void) 3754 { 3755 return ARRAY_SIZE(lpfc_hbq_defs); 3756 } 3757 3758 /** 3759 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 3760 * 3761 * This function adds the number of hbq entries in every HBQ to get 3762 * the total number of hbq entries required for the HBA and returns 3763 * the total count. 3764 **/ 3765 static int 3766 lpfc_sli_hbq_entry_count(void) 3767 { 3768 int hbq_count = lpfc_sli_hbq_count(); 3769 int count = 0; 3770 int i; 3771 3772 for (i = 0; i < hbq_count; ++i) 3773 count += lpfc_hbq_defs[i]->entry_count; 3774 return count; 3775 } 3776 3777 /** 3778 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 3779 * 3780 * This function calculates amount of memory required for all hbq entries 3781 * to be configured and returns the total memory required. 3782 **/ 3783 int 3784 lpfc_sli_hbq_size(void) 3785 { 3786 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 3787 } 3788 3789 /** 3790 * lpfc_sli_hbq_setup - configure and initialize HBQs 3791 * @phba: Pointer to HBA context object. 3792 * 3793 * This function is called during the SLI initialization to configure 3794 * all the HBQs and post buffers to the HBQ. The caller is not 3795 * required to hold any locks. This function will return zero if successful 3796 * else it will return negative error code. 3797 **/ 3798 static int 3799 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 3800 { 3801 int hbq_count = lpfc_sli_hbq_count(); 3802 LPFC_MBOXQ_t *pmb; 3803 MAILBOX_t *pmbox; 3804 uint32_t hbqno; 3805 uint32_t hbq_entry_index; 3806 3807 /* Get a Mailbox buffer to setup mailbox 3808 * commands for HBA initialization 3809 */ 3810 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3811 3812 if (!pmb) 3813 return -ENOMEM; 3814 3815 pmbox = &pmb->u.mb; 3816 3817 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3818 phba->link_state = LPFC_INIT_MBX_CMDS; 3819 phba->hbq_in_use = 1; 3820 3821 hbq_entry_index = 0; 3822 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 3823 phba->hbqs[hbqno].next_hbqPutIdx = 0; 3824 phba->hbqs[hbqno].hbqPutIdx = 0; 3825 phba->hbqs[hbqno].local_hbqGetIdx = 0; 3826 phba->hbqs[hbqno].entry_count = 3827 lpfc_hbq_defs[hbqno]->entry_count; 3828 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 3829 hbq_entry_index, pmb); 3830 hbq_entry_index += phba->hbqs[hbqno].entry_count; 3831 3832 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 3833 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 3834 mbxStatus <status>, ring <num> */ 3835 3836 lpfc_printf_log(phba, KERN_ERR, 3837 LOG_SLI | LOG_VPORT, 3838 "1805 Adapter failed to init. " 3839 "Data: x%x x%x x%x\n", 3840 pmbox->mbxCommand, 3841 pmbox->mbxStatus, hbqno); 3842 3843 phba->link_state = LPFC_HBA_ERROR; 3844 mempool_free(pmb, phba->mbox_mem_pool); 3845 return -ENXIO; 3846 } 3847 } 3848 phba->hbq_count = hbq_count; 3849 3850 mempool_free(pmb, phba->mbox_mem_pool); 3851 3852 /* Initially populate or replenish the HBQs */ 3853 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 3854 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 3855 return 0; 3856 } 3857 3858 /** 3859 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 3860 * @phba: Pointer to HBA context object. 3861 * 3862 * This function is called during the SLI initialization to configure 3863 * all the HBQs and post buffers to the HBQ. The caller is not 3864 * required to hold any locks. This function will return zero if successful 3865 * else it will return negative error code. 3866 **/ 3867 static int 3868 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 3869 { 3870 phba->hbq_in_use = 1; 3871 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 3872 phba->hbq_count = 1; 3873 /* Initially populate or replenish the HBQs */ 3874 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 3875 return 0; 3876 } 3877 3878 /** 3879 * lpfc_sli_config_port - Issue config port mailbox command 3880 * @phba: Pointer to HBA context object. 3881 * @sli_mode: sli mode - 2/3 3882 * 3883 * This function is called by the sli intialization code path 3884 * to issue config_port mailbox command. This function restarts the 3885 * HBA firmware and issues a config_port mailbox command to configure 3886 * the SLI interface in the sli mode specified by sli_mode 3887 * variable. The caller is not required to hold any locks. 3888 * The function returns 0 if successful, else returns negative error 3889 * code. 3890 **/ 3891 int 3892 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 3893 { 3894 LPFC_MBOXQ_t *pmb; 3895 uint32_t resetcount = 0, rc = 0, done = 0; 3896 3897 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3898 if (!pmb) { 3899 phba->link_state = LPFC_HBA_ERROR; 3900 return -ENOMEM; 3901 } 3902 3903 phba->sli_rev = sli_mode; 3904 while (resetcount < 2 && !done) { 3905 spin_lock_irq(&phba->hbalock); 3906 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 3907 spin_unlock_irq(&phba->hbalock); 3908 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3909 lpfc_sli_brdrestart(phba); 3910 rc = lpfc_sli_chipset_init(phba); 3911 if (rc) 3912 break; 3913 3914 spin_lock_irq(&phba->hbalock); 3915 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3916 spin_unlock_irq(&phba->hbalock); 3917 resetcount++; 3918 3919 /* Call pre CONFIG_PORT mailbox command initialization. A 3920 * value of 0 means the call was successful. Any other 3921 * nonzero value is a failure, but if ERESTART is returned, 3922 * the driver may reset the HBA and try again. 3923 */ 3924 rc = lpfc_config_port_prep(phba); 3925 if (rc == -ERESTART) { 3926 phba->link_state = LPFC_LINK_UNKNOWN; 3927 continue; 3928 } else if (rc) 3929 break; 3930 phba->link_state = LPFC_INIT_MBX_CMDS; 3931 lpfc_config_port(phba, pmb); 3932 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 3933 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3934 LPFC_SLI3_HBQ_ENABLED | 3935 LPFC_SLI3_CRP_ENABLED | 3936 LPFC_SLI3_BG_ENABLED | 3937 LPFC_SLI3_DSS_ENABLED); 3938 if (rc != MBX_SUCCESS) { 3939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3940 "0442 Adapter failed to init, mbxCmd x%x " 3941 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3942 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 3943 spin_lock_irq(&phba->hbalock); 3944 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 3945 spin_unlock_irq(&phba->hbalock); 3946 rc = -ENXIO; 3947 } else { 3948 /* Allow asynchronous mailbox command to go through */ 3949 spin_lock_irq(&phba->hbalock); 3950 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 3951 spin_unlock_irq(&phba->hbalock); 3952 done = 1; 3953 } 3954 } 3955 if (!done) { 3956 rc = -EINVAL; 3957 goto do_prep_failed; 3958 } 3959 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 3960 if (!pmb->u.mb.un.varCfgPort.cMA) { 3961 rc = -ENXIO; 3962 goto do_prep_failed; 3963 } 3964 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 3965 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3966 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 3967 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 3968 phba->max_vpi : phba->max_vports; 3969 3970 } else 3971 phba->max_vpi = 0; 3972 phba->fips_level = 0; 3973 phba->fips_spec_rev = 0; 3974 if (pmb->u.mb.un.varCfgPort.gdss) { 3975 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 3976 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 3977 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 3978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3979 "2850 Security Crypto Active. FIPS x%d " 3980 "(Spec Rev: x%d)", 3981 phba->fips_level, phba->fips_spec_rev); 3982 } 3983 if (pmb->u.mb.un.varCfgPort.sec_err) { 3984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3985 "2856 Config Port Security Crypto " 3986 "Error: x%x ", 3987 pmb->u.mb.un.varCfgPort.sec_err); 3988 } 3989 if (pmb->u.mb.un.varCfgPort.gerbm) 3990 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3991 if (pmb->u.mb.un.varCfgPort.gcrp) 3992 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3993 3994 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 3995 phba->port_gp = phba->mbox->us.s3_pgp.port; 3996 3997 if (phba->cfg_enable_bg) { 3998 if (pmb->u.mb.un.varCfgPort.gbg) 3999 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4000 else 4001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4002 "0443 Adapter did not grant " 4003 "BlockGuard\n"); 4004 } 4005 } else { 4006 phba->hbq_get = NULL; 4007 phba->port_gp = phba->mbox->us.s2.port; 4008 phba->max_vpi = 0; 4009 } 4010 do_prep_failed: 4011 mempool_free(pmb, phba->mbox_mem_pool); 4012 return rc; 4013 } 4014 4015 4016 /** 4017 * lpfc_sli_hba_setup - SLI intialization function 4018 * @phba: Pointer to HBA context object. 4019 * 4020 * This function is the main SLI intialization function. This function 4021 * is called by the HBA intialization code, HBA reset code and HBA 4022 * error attention handler code. Caller is not required to hold any 4023 * locks. This function issues config_port mailbox command to configure 4024 * the SLI, setup iocb rings and HBQ rings. In the end the function 4025 * calls the config_port_post function to issue init_link mailbox 4026 * command and to start the discovery. The function will return zero 4027 * if successful, else it will return negative error code. 4028 **/ 4029 int 4030 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4031 { 4032 uint32_t rc; 4033 int mode = 3; 4034 4035 switch (lpfc_sli_mode) { 4036 case 2: 4037 if (phba->cfg_enable_npiv) { 4038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4039 "1824 NPIV enabled: Override lpfc_sli_mode " 4040 "parameter (%d) to auto (0).\n", 4041 lpfc_sli_mode); 4042 break; 4043 } 4044 mode = 2; 4045 break; 4046 case 0: 4047 case 3: 4048 break; 4049 default: 4050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4051 "1819 Unrecognized lpfc_sli_mode " 4052 "parameter: %d.\n", lpfc_sli_mode); 4053 4054 break; 4055 } 4056 4057 rc = lpfc_sli_config_port(phba, mode); 4058 4059 if (rc && lpfc_sli_mode == 3) 4060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4061 "1820 Unable to select SLI-3. " 4062 "Not supported by adapter.\n"); 4063 if (rc && mode != 2) 4064 rc = lpfc_sli_config_port(phba, 2); 4065 if (rc) 4066 goto lpfc_sli_hba_setup_error; 4067 4068 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4069 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4070 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4071 if (!rc) { 4072 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4073 "2709 This device supports " 4074 "Advanced Error Reporting (AER)\n"); 4075 spin_lock_irq(&phba->hbalock); 4076 phba->hba_flag |= HBA_AER_ENABLED; 4077 spin_unlock_irq(&phba->hbalock); 4078 } else { 4079 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4080 "2708 This device does not support " 4081 "Advanced Error Reporting (AER)\n"); 4082 phba->cfg_aer_support = 0; 4083 } 4084 } 4085 4086 if (phba->sli_rev == 3) { 4087 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4088 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4089 } else { 4090 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4091 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4092 phba->sli3_options = 0; 4093 } 4094 4095 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4096 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4097 phba->sli_rev, phba->max_vpi); 4098 rc = lpfc_sli_ring_map(phba); 4099 4100 if (rc) 4101 goto lpfc_sli_hba_setup_error; 4102 4103 /* Init HBQs */ 4104 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4105 rc = lpfc_sli_hbq_setup(phba); 4106 if (rc) 4107 goto lpfc_sli_hba_setup_error; 4108 } 4109 spin_lock_irq(&phba->hbalock); 4110 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4111 spin_unlock_irq(&phba->hbalock); 4112 4113 rc = lpfc_config_port_post(phba); 4114 if (rc) 4115 goto lpfc_sli_hba_setup_error; 4116 4117 return rc; 4118 4119 lpfc_sli_hba_setup_error: 4120 phba->link_state = LPFC_HBA_ERROR; 4121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4122 "0445 Firmware initialization failed\n"); 4123 return rc; 4124 } 4125 4126 /** 4127 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4128 * @phba: Pointer to HBA context object. 4129 * @mboxq: mailbox pointer. 4130 * This function issue a dump mailbox command to read config region 4131 * 23 and parse the records in the region and populate driver 4132 * data structure. 4133 **/ 4134 static int 4135 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, 4136 LPFC_MBOXQ_t *mboxq) 4137 { 4138 struct lpfc_dmabuf *mp; 4139 struct lpfc_mqe *mqe; 4140 uint32_t data_length; 4141 int rc; 4142 4143 /* Program the default value of vlan_id and fc_map */ 4144 phba->valid_vlan = 0; 4145 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4146 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4147 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4148 4149 mqe = &mboxq->u.mqe; 4150 if (lpfc_dump_fcoe_param(phba, mboxq)) 4151 return -ENOMEM; 4152 4153 mp = (struct lpfc_dmabuf *) mboxq->context1; 4154 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4155 4156 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4157 "(%d):2571 Mailbox cmd x%x Status x%x " 4158 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4159 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4160 "CQ: x%x x%x x%x x%x\n", 4161 mboxq->vport ? mboxq->vport->vpi : 0, 4162 bf_get(lpfc_mqe_command, mqe), 4163 bf_get(lpfc_mqe_status, mqe), 4164 mqe->un.mb_words[0], mqe->un.mb_words[1], 4165 mqe->un.mb_words[2], mqe->un.mb_words[3], 4166 mqe->un.mb_words[4], mqe->un.mb_words[5], 4167 mqe->un.mb_words[6], mqe->un.mb_words[7], 4168 mqe->un.mb_words[8], mqe->un.mb_words[9], 4169 mqe->un.mb_words[10], mqe->un.mb_words[11], 4170 mqe->un.mb_words[12], mqe->un.mb_words[13], 4171 mqe->un.mb_words[14], mqe->un.mb_words[15], 4172 mqe->un.mb_words[16], mqe->un.mb_words[50], 4173 mboxq->mcqe.word0, 4174 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4175 mboxq->mcqe.trailer); 4176 4177 if (rc) { 4178 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4179 kfree(mp); 4180 return -EIO; 4181 } 4182 data_length = mqe->un.mb_words[5]; 4183 if (data_length > DMP_RGN23_SIZE) { 4184 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4185 kfree(mp); 4186 return -EIO; 4187 } 4188 4189 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4190 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4191 kfree(mp); 4192 return 0; 4193 } 4194 4195 /** 4196 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4197 * @phba: pointer to lpfc hba data structure. 4198 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4199 * @vpd: pointer to the memory to hold resulting port vpd data. 4200 * @vpd_size: On input, the number of bytes allocated to @vpd. 4201 * On output, the number of data bytes in @vpd. 4202 * 4203 * This routine executes a READ_REV SLI4 mailbox command. In 4204 * addition, this routine gets the port vpd data. 4205 * 4206 * Return codes 4207 * 0 - successful 4208 * -ENOMEM - could not allocated memory. 4209 **/ 4210 static int 4211 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4212 uint8_t *vpd, uint32_t *vpd_size) 4213 { 4214 int rc = 0; 4215 uint32_t dma_size; 4216 struct lpfc_dmabuf *dmabuf; 4217 struct lpfc_mqe *mqe; 4218 4219 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4220 if (!dmabuf) 4221 return -ENOMEM; 4222 4223 /* 4224 * Get a DMA buffer for the vpd data resulting from the READ_REV 4225 * mailbox command. 4226 */ 4227 dma_size = *vpd_size; 4228 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4229 dma_size, 4230 &dmabuf->phys, 4231 GFP_KERNEL); 4232 if (!dmabuf->virt) { 4233 kfree(dmabuf); 4234 return -ENOMEM; 4235 } 4236 memset(dmabuf->virt, 0, dma_size); 4237 4238 /* 4239 * The SLI4 implementation of READ_REV conflicts at word1, 4240 * bits 31:16 and SLI4 adds vpd functionality not present 4241 * in SLI3. This code corrects the conflicts. 4242 */ 4243 lpfc_read_rev(phba, mboxq); 4244 mqe = &mboxq->u.mqe; 4245 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4246 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4247 mqe->un.read_rev.word1 &= 0x0000FFFF; 4248 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4249 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4250 4251 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4252 if (rc) { 4253 dma_free_coherent(&phba->pcidev->dev, dma_size, 4254 dmabuf->virt, dmabuf->phys); 4255 kfree(dmabuf); 4256 return -EIO; 4257 } 4258 4259 /* 4260 * The available vpd length cannot be bigger than the 4261 * DMA buffer passed to the port. Catch the less than 4262 * case and update the caller's size. 4263 */ 4264 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4265 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4266 4267 memcpy(vpd, dmabuf->virt, *vpd_size); 4268 4269 dma_free_coherent(&phba->pcidev->dev, dma_size, 4270 dmabuf->virt, dmabuf->phys); 4271 kfree(dmabuf); 4272 return 0; 4273 } 4274 4275 /** 4276 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4277 * @phba: pointer to lpfc hba data structure. 4278 * 4279 * This routine is called to explicitly arm the SLI4 device's completion and 4280 * event queues 4281 **/ 4282 static void 4283 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4284 { 4285 uint8_t fcp_eqidx; 4286 4287 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4288 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4289 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4290 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4291 LPFC_QUEUE_REARM); 4292 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4293 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4294 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4295 LPFC_QUEUE_REARM); 4296 } 4297 4298 /** 4299 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 4300 * @phba: Pointer to HBA context object. 4301 * 4302 * This function is the main SLI4 device intialization PCI function. This 4303 * function is called by the HBA intialization code, HBA reset code and 4304 * HBA error attention handler code. Caller is not required to hold any 4305 * locks. 4306 **/ 4307 int 4308 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 4309 { 4310 int rc; 4311 LPFC_MBOXQ_t *mboxq; 4312 struct lpfc_mqe *mqe; 4313 uint8_t *vpd; 4314 uint32_t vpd_size; 4315 uint32_t ftr_rsp = 0; 4316 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 4317 struct lpfc_vport *vport = phba->pport; 4318 struct lpfc_dmabuf *mp; 4319 4320 /* Perform a PCI function reset to start from clean */ 4321 rc = lpfc_pci_function_reset(phba); 4322 if (unlikely(rc)) 4323 return -ENODEV; 4324 4325 /* Check the HBA Host Status Register for readyness */ 4326 rc = lpfc_sli4_post_status_check(phba); 4327 if (unlikely(rc)) 4328 return -ENODEV; 4329 else { 4330 spin_lock_irq(&phba->hbalock); 4331 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 4332 spin_unlock_irq(&phba->hbalock); 4333 } 4334 4335 /* 4336 * Allocate a single mailbox container for initializing the 4337 * port. 4338 */ 4339 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4340 if (!mboxq) 4341 return -ENOMEM; 4342 4343 /* 4344 * Continue initialization with default values even if driver failed 4345 * to read FCoE param config regions 4346 */ 4347 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 4348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4349 "2570 Failed to read FCoE parameters\n"); 4350 4351 /* Issue READ_REV to collect vpd and FW information. */ 4352 vpd_size = SLI4_PAGE_SIZE; 4353 vpd = kzalloc(vpd_size, GFP_KERNEL); 4354 if (!vpd) { 4355 rc = -ENOMEM; 4356 goto out_free_mbox; 4357 } 4358 4359 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 4360 if (unlikely(rc)) 4361 goto out_free_vpd; 4362 4363 mqe = &mboxq->u.mqe; 4364 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4365 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4366 phba->hba_flag |= HBA_FCOE_SUPPORT; 4367 4368 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 4369 LPFC_DCBX_CEE_MODE) 4370 phba->hba_flag |= HBA_FIP_SUPPORT; 4371 else 4372 phba->hba_flag &= ~HBA_FIP_SUPPORT; 4373 4374 if (phba->sli_rev != LPFC_SLI_REV4 || 4375 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4376 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4377 "0376 READ_REV Error. SLI Level %d " 4378 "FCoE enabled %d\n", 4379 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT); 4380 rc = -EIO; 4381 goto out_free_vpd; 4382 } 4383 /* 4384 * Evaluate the read rev and vpd data. Populate the driver 4385 * state with the results. If this routine fails, the failure 4386 * is not fatal as the driver will use generic values. 4387 */ 4388 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 4389 if (unlikely(!rc)) { 4390 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4391 "0377 Error %d parsing vpd. " 4392 "Using defaults.\n", rc); 4393 rc = 0; 4394 } 4395 4396 /* Save information as VPD data */ 4397 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 4398 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 4399 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 4400 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 4401 &mqe->un.read_rev); 4402 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 4403 &mqe->un.read_rev); 4404 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 4405 &mqe->un.read_rev); 4406 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 4407 &mqe->un.read_rev); 4408 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 4409 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 4410 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 4411 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 4412 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 4413 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 4414 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4415 "(%d):0380 READ_REV Status x%x " 4416 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 4417 mboxq->vport ? mboxq->vport->vpi : 0, 4418 bf_get(lpfc_mqe_status, mqe), 4419 phba->vpd.rev.opFwName, 4420 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 4421 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 4422 4423 /* 4424 * Discover the port's supported feature set and match it against the 4425 * hosts requests. 4426 */ 4427 lpfc_request_features(phba, mboxq); 4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4429 if (unlikely(rc)) { 4430 rc = -EIO; 4431 goto out_free_vpd; 4432 } 4433 4434 /* 4435 * The port must support FCP initiator mode as this is the 4436 * only mode running in the host. 4437 */ 4438 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 4439 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4440 "0378 No support for fcpi mode.\n"); 4441 ftr_rsp++; 4442 } 4443 4444 /* 4445 * If the port cannot support the host's requested features 4446 * then turn off the global config parameters to disable the 4447 * feature in the driver. This is not a fatal error. 4448 */ 4449 if ((phba->cfg_enable_bg) && 4450 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 4451 ftr_rsp++; 4452 4453 if (phba->max_vpi && phba->cfg_enable_npiv && 4454 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 4455 ftr_rsp++; 4456 4457 if (ftr_rsp) { 4458 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4459 "0379 Feature Mismatch Data: x%08x %08x " 4460 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 4461 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 4462 phba->cfg_enable_npiv, phba->max_vpi); 4463 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 4464 phba->cfg_enable_bg = 0; 4465 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 4466 phba->cfg_enable_npiv = 0; 4467 } 4468 4469 /* These SLI3 features are assumed in SLI4 */ 4470 spin_lock_irq(&phba->hbalock); 4471 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 4472 spin_unlock_irq(&phba->hbalock); 4473 4474 /* Read the port's service parameters. */ 4475 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 4476 if (rc) { 4477 phba->link_state = LPFC_HBA_ERROR; 4478 rc = -ENOMEM; 4479 goto out_free_vpd; 4480 } 4481 4482 mboxq->vport = vport; 4483 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4484 mp = (struct lpfc_dmabuf *) mboxq->context1; 4485 if (rc == MBX_SUCCESS) { 4486 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 4487 rc = 0; 4488 } 4489 4490 /* 4491 * This memory was allocated by the lpfc_read_sparam routine. Release 4492 * it to the mbuf pool. 4493 */ 4494 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4495 kfree(mp); 4496 mboxq->context1 = NULL; 4497 if (unlikely(rc)) { 4498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4499 "0382 READ_SPARAM command failed " 4500 "status %d, mbxStatus x%x\n", 4501 rc, bf_get(lpfc_mqe_status, mqe)); 4502 phba->link_state = LPFC_HBA_ERROR; 4503 rc = -EIO; 4504 goto out_free_vpd; 4505 } 4506 4507 if (phba->cfg_soft_wwnn) 4508 u64_to_wwn(phba->cfg_soft_wwnn, 4509 vport->fc_sparam.nodeName.u.wwn); 4510 if (phba->cfg_soft_wwpn) 4511 u64_to_wwn(phba->cfg_soft_wwpn, 4512 vport->fc_sparam.portName.u.wwn); 4513 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 4514 sizeof(struct lpfc_name)); 4515 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 4516 sizeof(struct lpfc_name)); 4517 4518 /* Update the fc_host data structures with new wwn. */ 4519 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4520 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4521 4522 /* Register SGL pool to the device using non-embedded mailbox command */ 4523 rc = lpfc_sli4_post_sgl_list(phba); 4524 if (unlikely(rc)) { 4525 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4526 "0582 Error %d during sgl post operation\n", 4527 rc); 4528 rc = -ENODEV; 4529 goto out_free_vpd; 4530 } 4531 4532 /* Register SCSI SGL pool to the device */ 4533 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 4534 if (unlikely(rc)) { 4535 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4536 "0383 Error %d during scsi sgl post " 4537 "operation\n", rc); 4538 /* Some Scsi buffers were moved to the abort scsi list */ 4539 /* A pci function reset will repost them */ 4540 rc = -ENODEV; 4541 goto out_free_vpd; 4542 } 4543 4544 /* Post the rpi header region to the device. */ 4545 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 4546 if (unlikely(rc)) { 4547 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4548 "0393 Error %d during rpi post operation\n", 4549 rc); 4550 rc = -ENODEV; 4551 goto out_free_vpd; 4552 } 4553 4554 /* Set up all the queues to the device */ 4555 rc = lpfc_sli4_queue_setup(phba); 4556 if (unlikely(rc)) { 4557 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4558 "0381 Error %d during queue setup.\n ", rc); 4559 goto out_stop_timers; 4560 } 4561 4562 /* Arm the CQs and then EQs on device */ 4563 lpfc_sli4_arm_cqeq_intr(phba); 4564 4565 /* Indicate device interrupt mode */ 4566 phba->sli4_hba.intr_enable = 1; 4567 4568 /* Allow asynchronous mailbox command to go through */ 4569 spin_lock_irq(&phba->hbalock); 4570 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4571 spin_unlock_irq(&phba->hbalock); 4572 4573 /* Post receive buffers to the device */ 4574 lpfc_sli4_rb_setup(phba); 4575 4576 /* Reset HBA FCF states after HBA reset */ 4577 phba->fcf.fcf_flag = 0; 4578 phba->fcf.current_rec.flag = 0; 4579 4580 /* Start the ELS watchdog timer */ 4581 mod_timer(&vport->els_tmofunc, 4582 jiffies + HZ * (phba->fc_ratov * 2)); 4583 4584 /* Start heart beat timer */ 4585 mod_timer(&phba->hb_tmofunc, 4586 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 4587 phba->hb_outstanding = 0; 4588 phba->last_completion_time = jiffies; 4589 4590 /* Start error attention (ERATT) polling timer */ 4591 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 4592 4593 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4594 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4595 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4596 if (!rc) { 4597 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4598 "2829 This device supports " 4599 "Advanced Error Reporting (AER)\n"); 4600 spin_lock_irq(&phba->hbalock); 4601 phba->hba_flag |= HBA_AER_ENABLED; 4602 spin_unlock_irq(&phba->hbalock); 4603 } else { 4604 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4605 "2830 This device does not support " 4606 "Advanced Error Reporting (AER)\n"); 4607 phba->cfg_aer_support = 0; 4608 } 4609 } 4610 4611 /* 4612 * The port is ready, set the host's link state to LINK_DOWN 4613 * in preparation for link interrupts. 4614 */ 4615 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); 4616 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4617 lpfc_set_loopback_flag(phba); 4618 /* Change driver state to LPFC_LINK_DOWN right before init link */ 4619 spin_lock_irq(&phba->hbalock); 4620 phba->link_state = LPFC_LINK_DOWN; 4621 spin_unlock_irq(&phba->hbalock); 4622 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 4623 if (unlikely(rc != MBX_NOT_FINISHED)) { 4624 kfree(vpd); 4625 return 0; 4626 } else 4627 rc = -EIO; 4628 4629 /* Unset all the queues set up in this routine when error out */ 4630 if (rc) 4631 lpfc_sli4_queue_unset(phba); 4632 4633 out_stop_timers: 4634 if (rc) 4635 lpfc_stop_hba_timers(phba); 4636 out_free_vpd: 4637 kfree(vpd); 4638 out_free_mbox: 4639 mempool_free(mboxq, phba->mbox_mem_pool); 4640 return rc; 4641 } 4642 4643 /** 4644 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4645 * @ptr: context object - pointer to hba structure. 4646 * 4647 * This is the callback function for mailbox timer. The mailbox 4648 * timer is armed when a new mailbox command is issued and the timer 4649 * is deleted when the mailbox complete. The function is called by 4650 * the kernel timer code when a mailbox does not complete within 4651 * expected time. This function wakes up the worker thread to 4652 * process the mailbox timeout and returns. All the processing is 4653 * done by the worker thread function lpfc_mbox_timeout_handler. 4654 **/ 4655 void 4656 lpfc_mbox_timeout(unsigned long ptr) 4657 { 4658 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 4659 unsigned long iflag; 4660 uint32_t tmo_posted; 4661 4662 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 4663 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 4664 if (!tmo_posted) 4665 phba->pport->work_port_events |= WORKER_MBOX_TMO; 4666 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 4667 4668 if (!tmo_posted) 4669 lpfc_worker_wake_up(phba); 4670 return; 4671 } 4672 4673 4674 /** 4675 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 4676 * @phba: Pointer to HBA context object. 4677 * 4678 * This function is called from worker thread when a mailbox command times out. 4679 * The caller is not required to hold any locks. This function will reset the 4680 * HBA and recover all the pending commands. 4681 **/ 4682 void 4683 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4684 { 4685 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4686 MAILBOX_t *mb = &pmbox->u.mb; 4687 struct lpfc_sli *psli = &phba->sli; 4688 struct lpfc_sli_ring *pring; 4689 4690 /* Check the pmbox pointer first. There is a race condition 4691 * between the mbox timeout handler getting executed in the 4692 * worklist and the mailbox actually completing. When this 4693 * race condition occurs, the mbox_active will be NULL. 4694 */ 4695 spin_lock_irq(&phba->hbalock); 4696 if (pmbox == NULL) { 4697 lpfc_printf_log(phba, KERN_WARNING, 4698 LOG_MBOX | LOG_SLI, 4699 "0353 Active Mailbox cleared - mailbox timeout " 4700 "exiting\n"); 4701 spin_unlock_irq(&phba->hbalock); 4702 return; 4703 } 4704 4705 /* Mbox cmd <mbxCommand> timeout */ 4706 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4707 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 4708 mb->mbxCommand, 4709 phba->pport->port_state, 4710 phba->sli.sli_flag, 4711 phba->sli.mbox_active); 4712 spin_unlock_irq(&phba->hbalock); 4713 4714 /* Setting state unknown so lpfc_sli_abort_iocb_ring 4715 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 4716 * it to fail all oustanding SCSI IO. 4717 */ 4718 spin_lock_irq(&phba->pport->work_port_lock); 4719 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 4720 spin_unlock_irq(&phba->pport->work_port_lock); 4721 spin_lock_irq(&phba->hbalock); 4722 phba->link_state = LPFC_LINK_UNKNOWN; 4723 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4724 spin_unlock_irq(&phba->hbalock); 4725 4726 pring = &psli->ring[psli->fcp_ring]; 4727 lpfc_sli_abort_iocb_ring(phba, pring); 4728 4729 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4730 "0345 Resetting board due to mailbox timeout\n"); 4731 4732 /* Reset the HBA device */ 4733 lpfc_reset_hba(phba); 4734 } 4735 4736 /** 4737 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 4738 * @phba: Pointer to HBA context object. 4739 * @pmbox: Pointer to mailbox object. 4740 * @flag: Flag indicating how the mailbox need to be processed. 4741 * 4742 * This function is called by discovery code and HBA management code 4743 * to submit a mailbox command to firmware with SLI-3 interface spec. This 4744 * function gets the hbalock to protect the data structures. 4745 * The mailbox command can be submitted in polling mode, in which case 4746 * this function will wait in a polling loop for the completion of the 4747 * mailbox. 4748 * If the mailbox is submitted in no_wait mode (not polling) the 4749 * function will submit the command and returns immediately without waiting 4750 * for the mailbox completion. The no_wait is supported only when HBA 4751 * is in SLI2/SLI3 mode - interrupts are enabled. 4752 * The SLI interface allows only one mailbox pending at a time. If the 4753 * mailbox is issued in polling mode and there is already a mailbox 4754 * pending, then the function will return an error. If the mailbox is issued 4755 * in NO_WAIT mode and there is a mailbox pending already, the function 4756 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 4757 * The sli layer owns the mailbox object until the completion of mailbox 4758 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 4759 * return codes the caller owns the mailbox command after the return of 4760 * the function. 4761 **/ 4762 static int 4763 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 4764 uint32_t flag) 4765 { 4766 MAILBOX_t *mb; 4767 struct lpfc_sli *psli = &phba->sli; 4768 uint32_t status, evtctr; 4769 uint32_t ha_copy; 4770 int i; 4771 unsigned long timeout; 4772 unsigned long drvr_flag = 0; 4773 uint32_t word0, ldata; 4774 void __iomem *to_slim; 4775 int processing_queue = 0; 4776 4777 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4778 if (!pmbox) { 4779 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4780 /* processing mbox queue from intr_handler */ 4781 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 4782 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4783 return MBX_SUCCESS; 4784 } 4785 processing_queue = 1; 4786 pmbox = lpfc_mbox_get(phba); 4787 if (!pmbox) { 4788 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4789 return MBX_SUCCESS; 4790 } 4791 } 4792 4793 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 4794 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 4795 if(!pmbox->vport) { 4796 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4797 lpfc_printf_log(phba, KERN_ERR, 4798 LOG_MBOX | LOG_VPORT, 4799 "1806 Mbox x%x failed. No vport\n", 4800 pmbox->u.mb.mbxCommand); 4801 dump_stack(); 4802 goto out_not_finished; 4803 } 4804 } 4805 4806 /* If the PCI channel is in offline state, do not post mbox. */ 4807 if (unlikely(pci_channel_offline(phba->pcidev))) { 4808 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4809 goto out_not_finished; 4810 } 4811 4812 /* If HBA has a deferred error attention, fail the iocb. */ 4813 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 4814 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4815 goto out_not_finished; 4816 } 4817 4818 psli = &phba->sli; 4819 4820 mb = &pmbox->u.mb; 4821 status = MBX_SUCCESS; 4822 4823 if (phba->link_state == LPFC_HBA_ERROR) { 4824 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4825 4826 /* Mbox command <mbxCommand> cannot issue */ 4827 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4828 "(%d):0311 Mailbox command x%x cannot " 4829 "issue Data: x%x x%x\n", 4830 pmbox->vport ? pmbox->vport->vpi : 0, 4831 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 4832 goto out_not_finished; 4833 } 4834 4835 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4836 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4837 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4838 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4839 "(%d):2528 Mailbox command x%x cannot " 4840 "issue Data: x%x x%x\n", 4841 pmbox->vport ? pmbox->vport->vpi : 0, 4842 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 4843 goto out_not_finished; 4844 } 4845 4846 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 4847 /* Polling for a mbox command when another one is already active 4848 * is not allowed in SLI. Also, the driver must have established 4849 * SLI2 mode to queue and process multiple mbox commands. 4850 */ 4851 4852 if (flag & MBX_POLL) { 4853 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4854 4855 /* Mbox command <mbxCommand> cannot issue */ 4856 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4857 "(%d):2529 Mailbox command x%x " 4858 "cannot issue Data: x%x x%x\n", 4859 pmbox->vport ? pmbox->vport->vpi : 0, 4860 pmbox->u.mb.mbxCommand, 4861 psli->sli_flag, flag); 4862 goto out_not_finished; 4863 } 4864 4865 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 4866 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4867 /* Mbox command <mbxCommand> cannot issue */ 4868 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4869 "(%d):2530 Mailbox command x%x " 4870 "cannot issue Data: x%x x%x\n", 4871 pmbox->vport ? pmbox->vport->vpi : 0, 4872 pmbox->u.mb.mbxCommand, 4873 psli->sli_flag, flag); 4874 goto out_not_finished; 4875 } 4876 4877 /* Another mailbox command is still being processed, queue this 4878 * command to be processed later. 4879 */ 4880 lpfc_mbox_put(phba, pmbox); 4881 4882 /* Mbox cmd issue - BUSY */ 4883 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4884 "(%d):0308 Mbox cmd issue - BUSY Data: " 4885 "x%x x%x x%x x%x\n", 4886 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 4887 mb->mbxCommand, phba->pport->port_state, 4888 psli->sli_flag, flag); 4889 4890 psli->slistat.mbox_busy++; 4891 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4892 4893 if (pmbox->vport) { 4894 lpfc_debugfs_disc_trc(pmbox->vport, 4895 LPFC_DISC_TRC_MBOX_VPORT, 4896 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 4897 (uint32_t)mb->mbxCommand, 4898 mb->un.varWords[0], mb->un.varWords[1]); 4899 } 4900 else { 4901 lpfc_debugfs_disc_trc(phba->pport, 4902 LPFC_DISC_TRC_MBOX, 4903 "MBOX Bsy: cmd:x%x mb:x%x x%x", 4904 (uint32_t)mb->mbxCommand, 4905 mb->un.varWords[0], mb->un.varWords[1]); 4906 } 4907 4908 return MBX_BUSY; 4909 } 4910 4911 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4912 4913 /* If we are not polling, we MUST be in SLI2 mode */ 4914 if (flag != MBX_POLL) { 4915 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 4916 (mb->mbxCommand != MBX_KILL_BOARD)) { 4917 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4918 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4919 /* Mbox command <mbxCommand> cannot issue */ 4920 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4921 "(%d):2531 Mailbox command x%x " 4922 "cannot issue Data: x%x x%x\n", 4923 pmbox->vport ? pmbox->vport->vpi : 0, 4924 pmbox->u.mb.mbxCommand, 4925 psli->sli_flag, flag); 4926 goto out_not_finished; 4927 } 4928 /* timeout active mbox command */ 4929 mod_timer(&psli->mbox_tmo, (jiffies + 4930 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 4931 } 4932 4933 /* Mailbox cmd <cmd> issue */ 4934 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4935 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 4936 "x%x\n", 4937 pmbox->vport ? pmbox->vport->vpi : 0, 4938 mb->mbxCommand, phba->pport->port_state, 4939 psli->sli_flag, flag); 4940 4941 if (mb->mbxCommand != MBX_HEARTBEAT) { 4942 if (pmbox->vport) { 4943 lpfc_debugfs_disc_trc(pmbox->vport, 4944 LPFC_DISC_TRC_MBOX_VPORT, 4945 "MBOX Send vport: cmd:x%x mb:x%x x%x", 4946 (uint32_t)mb->mbxCommand, 4947 mb->un.varWords[0], mb->un.varWords[1]); 4948 } 4949 else { 4950 lpfc_debugfs_disc_trc(phba->pport, 4951 LPFC_DISC_TRC_MBOX, 4952 "MBOX Send: cmd:x%x mb:x%x x%x", 4953 (uint32_t)mb->mbxCommand, 4954 mb->un.varWords[0], mb->un.varWords[1]); 4955 } 4956 } 4957 4958 psli->slistat.mbox_cmd++; 4959 evtctr = psli->slistat.mbox_event; 4960 4961 /* next set own bit for the adapter and copy over command word */ 4962 mb->mbxOwner = OWN_CHIP; 4963 4964 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 4965 /* Populate mbox extension offset word. */ 4966 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 4967 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 4968 = (uint8_t *)phba->mbox_ext 4969 - (uint8_t *)phba->mbox; 4970 } 4971 4972 /* Copy the mailbox extension data */ 4973 if (pmbox->in_ext_byte_len && pmbox->context2) { 4974 lpfc_sli_pcimem_bcopy(pmbox->context2, 4975 (uint8_t *)phba->mbox_ext, 4976 pmbox->in_ext_byte_len); 4977 } 4978 /* Copy command data to host SLIM area */ 4979 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4980 } else { 4981 /* Populate mbox extension offset word. */ 4982 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 4983 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 4984 = MAILBOX_HBA_EXT_OFFSET; 4985 4986 /* Copy the mailbox extension data */ 4987 if (pmbox->in_ext_byte_len && pmbox->context2) { 4988 lpfc_memcpy_to_slim(phba->MBslimaddr + 4989 MAILBOX_HBA_EXT_OFFSET, 4990 pmbox->context2, pmbox->in_ext_byte_len); 4991 4992 } 4993 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4994 /* copy command data into host mbox for cmpl */ 4995 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4996 } 4997 4998 /* First copy mbox command data to HBA SLIM, skip past first 4999 word */ 5000 to_slim = phba->MBslimaddr + sizeof (uint32_t); 5001 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 5002 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 5003 5004 /* Next copy over first word, with mbxOwner set */ 5005 ldata = *((uint32_t *)mb); 5006 to_slim = phba->MBslimaddr; 5007 writel(ldata, to_slim); 5008 readl(to_slim); /* flush */ 5009 5010 if (mb->mbxCommand == MBX_CONFIG_PORT) { 5011 /* switch over to host mailbox */ 5012 psli->sli_flag |= LPFC_SLI_ACTIVE; 5013 } 5014 } 5015 5016 wmb(); 5017 5018 switch (flag) { 5019 case MBX_NOWAIT: 5020 /* Set up reference to mailbox command */ 5021 psli->mbox_active = pmbox; 5022 /* Interrupt board to do it */ 5023 writel(CA_MBATT, phba->CAregaddr); 5024 readl(phba->CAregaddr); /* flush */ 5025 /* Don't wait for it to finish, just return */ 5026 break; 5027 5028 case MBX_POLL: 5029 /* Set up null reference to mailbox command */ 5030 psli->mbox_active = NULL; 5031 /* Interrupt board to do it */ 5032 writel(CA_MBATT, phba->CAregaddr); 5033 readl(phba->CAregaddr); /* flush */ 5034 5035 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5036 /* First read mbox status word */ 5037 word0 = *((uint32_t *)phba->mbox); 5038 word0 = le32_to_cpu(word0); 5039 } else { 5040 /* First read mbox status word */ 5041 word0 = readl(phba->MBslimaddr); 5042 } 5043 5044 /* Read the HBA Host Attention Register */ 5045 ha_copy = readl(phba->HAregaddr); 5046 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 5047 mb->mbxCommand) * 5048 1000) + jiffies; 5049 i = 0; 5050 /* Wait for command to complete */ 5051 while (((word0 & OWN_CHIP) == OWN_CHIP) || 5052 (!(ha_copy & HA_MBATT) && 5053 (phba->link_state > LPFC_WARM_START))) { 5054 if (time_after(jiffies, timeout)) { 5055 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5056 spin_unlock_irqrestore(&phba->hbalock, 5057 drvr_flag); 5058 goto out_not_finished; 5059 } 5060 5061 /* Check if we took a mbox interrupt while we were 5062 polling */ 5063 if (((word0 & OWN_CHIP) != OWN_CHIP) 5064 && (evtctr != psli->slistat.mbox_event)) 5065 break; 5066 5067 if (i++ > 10) { 5068 spin_unlock_irqrestore(&phba->hbalock, 5069 drvr_flag); 5070 msleep(1); 5071 spin_lock_irqsave(&phba->hbalock, drvr_flag); 5072 } 5073 5074 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5075 /* First copy command data */ 5076 word0 = *((uint32_t *)phba->mbox); 5077 word0 = le32_to_cpu(word0); 5078 if (mb->mbxCommand == MBX_CONFIG_PORT) { 5079 MAILBOX_t *slimmb; 5080 uint32_t slimword0; 5081 /* Check real SLIM for any errors */ 5082 slimword0 = readl(phba->MBslimaddr); 5083 slimmb = (MAILBOX_t *) & slimword0; 5084 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 5085 && slimmb->mbxStatus) { 5086 psli->sli_flag &= 5087 ~LPFC_SLI_ACTIVE; 5088 word0 = slimword0; 5089 } 5090 } 5091 } else { 5092 /* First copy command data */ 5093 word0 = readl(phba->MBslimaddr); 5094 } 5095 /* Read the HBA Host Attention Register */ 5096 ha_copy = readl(phba->HAregaddr); 5097 } 5098 5099 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5100 /* copy results back to user */ 5101 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5102 /* Copy the mailbox extension data */ 5103 if (pmbox->out_ext_byte_len && pmbox->context2) { 5104 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 5105 pmbox->context2, 5106 pmbox->out_ext_byte_len); 5107 } 5108 } else { 5109 /* First copy command data */ 5110 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 5111 MAILBOX_CMD_SIZE); 5112 /* Copy the mailbox extension data */ 5113 if (pmbox->out_ext_byte_len && pmbox->context2) { 5114 lpfc_memcpy_from_slim(pmbox->context2, 5115 phba->MBslimaddr + 5116 MAILBOX_HBA_EXT_OFFSET, 5117 pmbox->out_ext_byte_len); 5118 } 5119 } 5120 5121 writel(HA_MBATT, phba->HAregaddr); 5122 readl(phba->HAregaddr); /* flush */ 5123 5124 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5125 status = mb->mbxStatus; 5126 } 5127 5128 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5129 return status; 5130 5131 out_not_finished: 5132 if (processing_queue) { 5133 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 5134 lpfc_mbox_cmpl_put(phba, pmbox); 5135 } 5136 return MBX_NOT_FINISHED; 5137 } 5138 5139 /** 5140 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 5141 * @phba: Pointer to HBA context object. 5142 * 5143 * The function blocks the posting of SLI4 asynchronous mailbox commands from 5144 * the driver internal pending mailbox queue. It will then try to wait out the 5145 * possible outstanding mailbox command before return. 5146 * 5147 * Returns: 5148 * 0 - the outstanding mailbox command completed; otherwise, the wait for 5149 * the outstanding mailbox command timed out. 5150 **/ 5151 static int 5152 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 5153 { 5154 struct lpfc_sli *psli = &phba->sli; 5155 uint8_t actcmd = MBX_HEARTBEAT; 5156 int rc = 0; 5157 unsigned long timeout; 5158 5159 /* Mark the asynchronous mailbox command posting as blocked */ 5160 spin_lock_irq(&phba->hbalock); 5161 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 5162 if (phba->sli.mbox_active) 5163 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 5164 spin_unlock_irq(&phba->hbalock); 5165 /* Determine how long we might wait for the active mailbox 5166 * command to be gracefully completed by firmware. 5167 */ 5168 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 5169 jiffies; 5170 /* Wait for the outstnading mailbox command to complete */ 5171 while (phba->sli.mbox_active) { 5172 /* Check active mailbox complete status every 2ms */ 5173 msleep(2); 5174 if (time_after(jiffies, timeout)) { 5175 /* Timeout, marked the outstanding cmd not complete */ 5176 rc = 1; 5177 break; 5178 } 5179 } 5180 5181 /* Can not cleanly block async mailbox command, fails it */ 5182 if (rc) { 5183 spin_lock_irq(&phba->hbalock); 5184 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5185 spin_unlock_irq(&phba->hbalock); 5186 } 5187 return rc; 5188 } 5189 5190 /** 5191 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 5192 * @phba: Pointer to HBA context object. 5193 * 5194 * The function unblocks and resume posting of SLI4 asynchronous mailbox 5195 * commands from the driver internal pending mailbox queue. It makes sure 5196 * that there is no outstanding mailbox command before resuming posting 5197 * asynchronous mailbox commands. If, for any reason, there is outstanding 5198 * mailbox command, it will try to wait it out before resuming asynchronous 5199 * mailbox command posting. 5200 **/ 5201 static void 5202 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 5203 { 5204 struct lpfc_sli *psli = &phba->sli; 5205 5206 spin_lock_irq(&phba->hbalock); 5207 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 5208 /* Asynchronous mailbox posting is not blocked, do nothing */ 5209 spin_unlock_irq(&phba->hbalock); 5210 return; 5211 } 5212 5213 /* Outstanding synchronous mailbox command is guaranteed to be done, 5214 * successful or timeout, after timing-out the outstanding mailbox 5215 * command shall always be removed, so just unblock posting async 5216 * mailbox command and resume 5217 */ 5218 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5219 spin_unlock_irq(&phba->hbalock); 5220 5221 /* wake up worker thread to post asynchronlous mailbox command */ 5222 lpfc_worker_wake_up(phba); 5223 } 5224 5225 /** 5226 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 5227 * @phba: Pointer to HBA context object. 5228 * @mboxq: Pointer to mailbox object. 5229 * 5230 * The function posts a mailbox to the port. The mailbox is expected 5231 * to be comletely filled in and ready for the port to operate on it. 5232 * This routine executes a synchronous completion operation on the 5233 * mailbox by polling for its completion. 5234 * 5235 * The caller must not be holding any locks when calling this routine. 5236 * 5237 * Returns: 5238 * MBX_SUCCESS - mailbox posted successfully 5239 * Any of the MBX error values. 5240 **/ 5241 static int 5242 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5243 { 5244 int rc = MBX_SUCCESS; 5245 unsigned long iflag; 5246 uint32_t db_ready; 5247 uint32_t mcqe_status; 5248 uint32_t mbx_cmnd; 5249 unsigned long timeout; 5250 struct lpfc_sli *psli = &phba->sli; 5251 struct lpfc_mqe *mb = &mboxq->u.mqe; 5252 struct lpfc_bmbx_create *mbox_rgn; 5253 struct dma_address *dma_address; 5254 struct lpfc_register bmbx_reg; 5255 5256 /* 5257 * Only one mailbox can be active to the bootstrap mailbox region 5258 * at a time and there is no queueing provided. 5259 */ 5260 spin_lock_irqsave(&phba->hbalock, iflag); 5261 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5262 spin_unlock_irqrestore(&phba->hbalock, iflag); 5263 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5264 "(%d):2532 Mailbox command x%x (x%x) " 5265 "cannot issue Data: x%x x%x\n", 5266 mboxq->vport ? mboxq->vport->vpi : 0, 5267 mboxq->u.mb.mbxCommand, 5268 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5269 psli->sli_flag, MBX_POLL); 5270 return MBXERR_ERROR; 5271 } 5272 /* The server grabs the token and owns it until release */ 5273 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5274 phba->sli.mbox_active = mboxq; 5275 spin_unlock_irqrestore(&phba->hbalock, iflag); 5276 5277 /* 5278 * Initialize the bootstrap memory region to avoid stale data areas 5279 * in the mailbox post. Then copy the caller's mailbox contents to 5280 * the bmbx mailbox region. 5281 */ 5282 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 5283 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 5284 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 5285 sizeof(struct lpfc_mqe)); 5286 5287 /* Post the high mailbox dma address to the port and wait for ready. */ 5288 dma_address = &phba->sli4_hba.bmbx.dma_address; 5289 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 5290 5291 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 5292 * 1000) + jiffies; 5293 do { 5294 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 5295 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 5296 if (!db_ready) 5297 msleep(2); 5298 5299 if (time_after(jiffies, timeout)) { 5300 rc = MBXERR_ERROR; 5301 goto exit; 5302 } 5303 } while (!db_ready); 5304 5305 /* Post the low mailbox dma address to the port. */ 5306 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 5307 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 5308 * 1000) + jiffies; 5309 do { 5310 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 5311 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 5312 if (!db_ready) 5313 msleep(2); 5314 5315 if (time_after(jiffies, timeout)) { 5316 rc = MBXERR_ERROR; 5317 goto exit; 5318 } 5319 } while (!db_ready); 5320 5321 /* 5322 * Read the CQ to ensure the mailbox has completed. 5323 * If so, update the mailbox status so that the upper layers 5324 * can complete the request normally. 5325 */ 5326 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 5327 sizeof(struct lpfc_mqe)); 5328 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 5329 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 5330 sizeof(struct lpfc_mcqe)); 5331 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 5332 5333 /* Prefix the mailbox status with range x4000 to note SLI4 status. */ 5334 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 5335 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 5336 rc = MBXERR_ERROR; 5337 } else 5338 lpfc_sli4_swap_str(phba, mboxq); 5339 5340 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5341 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " 5342 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 5343 " x%x x%x CQ: x%x x%x x%x x%x\n", 5344 mboxq->vport ? mboxq->vport->vpi : 0, 5345 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), 5346 bf_get(lpfc_mqe_status, mb), 5347 mb->un.mb_words[0], mb->un.mb_words[1], 5348 mb->un.mb_words[2], mb->un.mb_words[3], 5349 mb->un.mb_words[4], mb->un.mb_words[5], 5350 mb->un.mb_words[6], mb->un.mb_words[7], 5351 mb->un.mb_words[8], mb->un.mb_words[9], 5352 mb->un.mb_words[10], mb->un.mb_words[11], 5353 mb->un.mb_words[12], mboxq->mcqe.word0, 5354 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5355 mboxq->mcqe.trailer); 5356 exit: 5357 /* We are holding the token, no needed for lock when release */ 5358 spin_lock_irqsave(&phba->hbalock, iflag); 5359 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5360 phba->sli.mbox_active = NULL; 5361 spin_unlock_irqrestore(&phba->hbalock, iflag); 5362 return rc; 5363 } 5364 5365 /** 5366 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 5367 * @phba: Pointer to HBA context object. 5368 * @pmbox: Pointer to mailbox object. 5369 * @flag: Flag indicating how the mailbox need to be processed. 5370 * 5371 * This function is called by discovery code and HBA management code to submit 5372 * a mailbox command to firmware with SLI-4 interface spec. 5373 * 5374 * Return codes the caller owns the mailbox command after the return of the 5375 * function. 5376 **/ 5377 static int 5378 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5379 uint32_t flag) 5380 { 5381 struct lpfc_sli *psli = &phba->sli; 5382 unsigned long iflags; 5383 int rc; 5384 5385 rc = lpfc_mbox_dev_check(phba); 5386 if (unlikely(rc)) { 5387 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5388 "(%d):2544 Mailbox command x%x (x%x) " 5389 "cannot issue Data: x%x x%x\n", 5390 mboxq->vport ? mboxq->vport->vpi : 0, 5391 mboxq->u.mb.mbxCommand, 5392 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5393 psli->sli_flag, flag); 5394 goto out_not_finished; 5395 } 5396 5397 /* Detect polling mode and jump to a handler */ 5398 if (!phba->sli4_hba.intr_enable) { 5399 if (flag == MBX_POLL) 5400 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 5401 else 5402 rc = -EIO; 5403 if (rc != MBX_SUCCESS) 5404 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5405 "(%d):2541 Mailbox command x%x " 5406 "(x%x) cannot issue Data: x%x x%x\n", 5407 mboxq->vport ? mboxq->vport->vpi : 0, 5408 mboxq->u.mb.mbxCommand, 5409 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5410 psli->sli_flag, flag); 5411 return rc; 5412 } else if (flag == MBX_POLL) { 5413 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5414 "(%d):2542 Try to issue mailbox command " 5415 "x%x (x%x) synchronously ahead of async" 5416 "mailbox command queue: x%x x%x\n", 5417 mboxq->vport ? mboxq->vport->vpi : 0, 5418 mboxq->u.mb.mbxCommand, 5419 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5420 psli->sli_flag, flag); 5421 /* Try to block the asynchronous mailbox posting */ 5422 rc = lpfc_sli4_async_mbox_block(phba); 5423 if (!rc) { 5424 /* Successfully blocked, now issue sync mbox cmd */ 5425 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 5426 if (rc != MBX_SUCCESS) 5427 lpfc_printf_log(phba, KERN_ERR, 5428 LOG_MBOX | LOG_SLI, 5429 "(%d):2597 Mailbox command " 5430 "x%x (x%x) cannot issue " 5431 "Data: x%x x%x\n", 5432 mboxq->vport ? 5433 mboxq->vport->vpi : 0, 5434 mboxq->u.mb.mbxCommand, 5435 lpfc_sli4_mbox_opcode_get(phba, 5436 mboxq), 5437 psli->sli_flag, flag); 5438 /* Unblock the async mailbox posting afterward */ 5439 lpfc_sli4_async_mbox_unblock(phba); 5440 } 5441 return rc; 5442 } 5443 5444 /* Now, interrupt mode asynchrous mailbox command */ 5445 rc = lpfc_mbox_cmd_check(phba, mboxq); 5446 if (rc) { 5447 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5448 "(%d):2543 Mailbox command x%x (x%x) " 5449 "cannot issue Data: x%x x%x\n", 5450 mboxq->vport ? mboxq->vport->vpi : 0, 5451 mboxq->u.mb.mbxCommand, 5452 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5453 psli->sli_flag, flag); 5454 goto out_not_finished; 5455 } 5456 5457 /* Put the mailbox command to the driver internal FIFO */ 5458 psli->slistat.mbox_busy++; 5459 spin_lock_irqsave(&phba->hbalock, iflags); 5460 lpfc_mbox_put(phba, mboxq); 5461 spin_unlock_irqrestore(&phba->hbalock, iflags); 5462 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5463 "(%d):0354 Mbox cmd issue - Enqueue Data: " 5464 "x%x (x%x) x%x x%x x%x\n", 5465 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 5466 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5467 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5468 phba->pport->port_state, 5469 psli->sli_flag, MBX_NOWAIT); 5470 /* Wake up worker thread to transport mailbox command from head */ 5471 lpfc_worker_wake_up(phba); 5472 5473 return MBX_BUSY; 5474 5475 out_not_finished: 5476 return MBX_NOT_FINISHED; 5477 } 5478 5479 /** 5480 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 5481 * @phba: Pointer to HBA context object. 5482 * 5483 * This function is called by worker thread to send a mailbox command to 5484 * SLI4 HBA firmware. 5485 * 5486 **/ 5487 int 5488 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 5489 { 5490 struct lpfc_sli *psli = &phba->sli; 5491 LPFC_MBOXQ_t *mboxq; 5492 int rc = MBX_SUCCESS; 5493 unsigned long iflags; 5494 struct lpfc_mqe *mqe; 5495 uint32_t mbx_cmnd; 5496 5497 /* Check interrupt mode before post async mailbox command */ 5498 if (unlikely(!phba->sli4_hba.intr_enable)) 5499 return MBX_NOT_FINISHED; 5500 5501 /* Check for mailbox command service token */ 5502 spin_lock_irqsave(&phba->hbalock, iflags); 5503 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 5504 spin_unlock_irqrestore(&phba->hbalock, iflags); 5505 return MBX_NOT_FINISHED; 5506 } 5507 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5508 spin_unlock_irqrestore(&phba->hbalock, iflags); 5509 return MBX_NOT_FINISHED; 5510 } 5511 if (unlikely(phba->sli.mbox_active)) { 5512 spin_unlock_irqrestore(&phba->hbalock, iflags); 5513 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5514 "0384 There is pending active mailbox cmd\n"); 5515 return MBX_NOT_FINISHED; 5516 } 5517 /* Take the mailbox command service token */ 5518 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5519 5520 /* Get the next mailbox command from head of queue */ 5521 mboxq = lpfc_mbox_get(phba); 5522 5523 /* If no more mailbox command waiting for post, we're done */ 5524 if (!mboxq) { 5525 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5526 spin_unlock_irqrestore(&phba->hbalock, iflags); 5527 return MBX_SUCCESS; 5528 } 5529 phba->sli.mbox_active = mboxq; 5530 spin_unlock_irqrestore(&phba->hbalock, iflags); 5531 5532 /* Check device readiness for posting mailbox command */ 5533 rc = lpfc_mbox_dev_check(phba); 5534 if (unlikely(rc)) 5535 /* Driver clean routine will clean up pending mailbox */ 5536 goto out_not_finished; 5537 5538 /* Prepare the mbox command to be posted */ 5539 mqe = &mboxq->u.mqe; 5540 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 5541 5542 /* Start timer for the mbox_tmo and log some mailbox post messages */ 5543 mod_timer(&psli->mbox_tmo, (jiffies + 5544 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); 5545 5546 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5547 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " 5548 "x%x x%x\n", 5549 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 5550 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5551 phba->pport->port_state, psli->sli_flag); 5552 5553 if (mbx_cmnd != MBX_HEARTBEAT) { 5554 if (mboxq->vport) { 5555 lpfc_debugfs_disc_trc(mboxq->vport, 5556 LPFC_DISC_TRC_MBOX_VPORT, 5557 "MBOX Send vport: cmd:x%x mb:x%x x%x", 5558 mbx_cmnd, mqe->un.mb_words[0], 5559 mqe->un.mb_words[1]); 5560 } else { 5561 lpfc_debugfs_disc_trc(phba->pport, 5562 LPFC_DISC_TRC_MBOX, 5563 "MBOX Send: cmd:x%x mb:x%x x%x", 5564 mbx_cmnd, mqe->un.mb_words[0], 5565 mqe->un.mb_words[1]); 5566 } 5567 } 5568 psli->slistat.mbox_cmd++; 5569 5570 /* Post the mailbox command to the port */ 5571 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 5572 if (rc != MBX_SUCCESS) { 5573 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5574 "(%d):2533 Mailbox command x%x (x%x) " 5575 "cannot issue Data: x%x x%x\n", 5576 mboxq->vport ? mboxq->vport->vpi : 0, 5577 mboxq->u.mb.mbxCommand, 5578 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5579 psli->sli_flag, MBX_NOWAIT); 5580 goto out_not_finished; 5581 } 5582 5583 return rc; 5584 5585 out_not_finished: 5586 spin_lock_irqsave(&phba->hbalock, iflags); 5587 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 5588 __lpfc_mbox_cmpl_put(phba, mboxq); 5589 /* Release the token */ 5590 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5591 phba->sli.mbox_active = NULL; 5592 spin_unlock_irqrestore(&phba->hbalock, iflags); 5593 5594 return MBX_NOT_FINISHED; 5595 } 5596 5597 /** 5598 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 5599 * @phba: Pointer to HBA context object. 5600 * @pmbox: Pointer to mailbox object. 5601 * @flag: Flag indicating how the mailbox need to be processed. 5602 * 5603 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 5604 * the API jump table function pointer from the lpfc_hba struct. 5605 * 5606 * Return codes the caller owns the mailbox command after the return of the 5607 * function. 5608 **/ 5609 int 5610 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 5611 { 5612 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 5613 } 5614 5615 /** 5616 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table 5617 * @phba: The hba struct for which this call is being executed. 5618 * @dev_grp: The HBA PCI-Device group number. 5619 * 5620 * This routine sets up the mbox interface API function jump table in @phba 5621 * struct. 5622 * Returns: 0 - success, -ENODEV - failure. 5623 **/ 5624 int 5625 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5626 { 5627 5628 switch (dev_grp) { 5629 case LPFC_PCI_DEV_LP: 5630 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 5631 phba->lpfc_sli_handle_slow_ring_event = 5632 lpfc_sli_handle_slow_ring_event_s3; 5633 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 5634 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 5635 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 5636 break; 5637 case LPFC_PCI_DEV_OC: 5638 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 5639 phba->lpfc_sli_handle_slow_ring_event = 5640 lpfc_sli_handle_slow_ring_event_s4; 5641 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 5642 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 5643 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 5644 break; 5645 default: 5646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5647 "1420 Invalid HBA PCI-device group: 0x%x\n", 5648 dev_grp); 5649 return -ENODEV; 5650 break; 5651 } 5652 return 0; 5653 } 5654 5655 /** 5656 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5657 * @phba: Pointer to HBA context object. 5658 * @pring: Pointer to driver SLI ring object. 5659 * @piocb: Pointer to address of newly added command iocb. 5660 * 5661 * This function is called with hbalock held to add a command 5662 * iocb to the txq when SLI layer cannot submit the command iocb 5663 * to the ring. 5664 **/ 5665 void 5666 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5667 struct lpfc_iocbq *piocb) 5668 { 5669 /* Insert the caller's iocb in the txq tail for later processing. */ 5670 list_add_tail(&piocb->list, &pring->txq); 5671 pring->txq_cnt++; 5672 } 5673 5674 /** 5675 * lpfc_sli_next_iocb - Get the next iocb in the txq 5676 * @phba: Pointer to HBA context object. 5677 * @pring: Pointer to driver SLI ring object. 5678 * @piocb: Pointer to address of newly added command iocb. 5679 * 5680 * This function is called with hbalock held before a new 5681 * iocb is submitted to the firmware. This function checks 5682 * txq to flush the iocbs in txq to Firmware before 5683 * submitting new iocbs to the Firmware. 5684 * If there are iocbs in the txq which need to be submitted 5685 * to firmware, lpfc_sli_next_iocb returns the first element 5686 * of the txq after dequeuing it from txq. 5687 * If there is no iocb in the txq then the function will return 5688 * *piocb and *piocb is set to NULL. Caller needs to check 5689 * *piocb to find if there are more commands in the txq. 5690 **/ 5691 static struct lpfc_iocbq * 5692 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5693 struct lpfc_iocbq **piocb) 5694 { 5695 struct lpfc_iocbq * nextiocb; 5696 5697 nextiocb = lpfc_sli_ringtx_get(phba, pring); 5698 if (!nextiocb) { 5699 nextiocb = *piocb; 5700 *piocb = NULL; 5701 } 5702 5703 return nextiocb; 5704 } 5705 5706 /** 5707 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 5708 * @phba: Pointer to HBA context object. 5709 * @ring_number: SLI ring number to issue iocb on. 5710 * @piocb: Pointer to command iocb. 5711 * @flag: Flag indicating if this command can be put into txq. 5712 * 5713 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 5714 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 5715 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5716 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 5717 * this function allows only iocbs for posting buffers. This function finds 5718 * next available slot in the command ring and posts the command to the 5719 * available slot and writes the port attention register to request HBA start 5720 * processing new iocb. If there is no slot available in the ring and 5721 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 5722 * the function returns IOCB_BUSY. 5723 * 5724 * This function is called with hbalock held. The function will return success 5725 * after it successfully submit the iocb to firmware or after adding to the 5726 * txq. 5727 **/ 5728 static int 5729 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 5730 struct lpfc_iocbq *piocb, uint32_t flag) 5731 { 5732 struct lpfc_iocbq *nextiocb; 5733 IOCB_t *iocb; 5734 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 5735 5736 if (piocb->iocb_cmpl && (!piocb->vport) && 5737 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5738 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 5739 lpfc_printf_log(phba, KERN_ERR, 5740 LOG_SLI | LOG_VPORT, 5741 "1807 IOCB x%x failed. No vport\n", 5742 piocb->iocb.ulpCommand); 5743 dump_stack(); 5744 return IOCB_ERROR; 5745 } 5746 5747 5748 /* If the PCI channel is in offline state, do not post iocbs. */ 5749 if (unlikely(pci_channel_offline(phba->pcidev))) 5750 return IOCB_ERROR; 5751 5752 /* If HBA has a deferred error attention, fail the iocb. */ 5753 if (unlikely(phba->hba_flag & DEFER_ERATT)) 5754 return IOCB_ERROR; 5755 5756 /* 5757 * We should never get an IOCB if we are in a < LINK_DOWN state 5758 */ 5759 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5760 return IOCB_ERROR; 5761 5762 /* 5763 * Check to see if we are blocking IOCB processing because of a 5764 * outstanding event. 5765 */ 5766 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 5767 goto iocb_busy; 5768 5769 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 5770 /* 5771 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 5772 * can be issued if the link is not up. 5773 */ 5774 switch (piocb->iocb.ulpCommand) { 5775 case CMD_GEN_REQUEST64_CR: 5776 case CMD_GEN_REQUEST64_CX: 5777 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 5778 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 5779 FC_RCTL_DD_UNSOL_CMD) || 5780 (piocb->iocb.un.genreq64.w5.hcsw.Type != 5781 MENLO_TRANSPORT_TYPE)) 5782 5783 goto iocb_busy; 5784 break; 5785 case CMD_QUE_RING_BUF_CN: 5786 case CMD_QUE_RING_BUF64_CN: 5787 /* 5788 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 5789 * completion, iocb_cmpl MUST be 0. 5790 */ 5791 if (piocb->iocb_cmpl) 5792 piocb->iocb_cmpl = NULL; 5793 /*FALLTHROUGH*/ 5794 case CMD_CREATE_XRI_CR: 5795 case CMD_CLOSE_XRI_CN: 5796 case CMD_CLOSE_XRI_CX: 5797 break; 5798 default: 5799 goto iocb_busy; 5800 } 5801 5802 /* 5803 * For FCP commands, we must be in a state where we can process link 5804 * attention events. 5805 */ 5806 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 5807 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 5808 goto iocb_busy; 5809 } 5810 5811 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 5812 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 5813 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 5814 5815 if (iocb) 5816 lpfc_sli_update_ring(phba, pring); 5817 else 5818 lpfc_sli_update_full_ring(phba, pring); 5819 5820 if (!piocb) 5821 return IOCB_SUCCESS; 5822 5823 goto out_busy; 5824 5825 iocb_busy: 5826 pring->stats.iocb_cmd_delay++; 5827 5828 out_busy: 5829 5830 if (!(flag & SLI_IOCB_RET_IOCB)) { 5831 __lpfc_sli_ringtx_put(phba, pring, piocb); 5832 return IOCB_SUCCESS; 5833 } 5834 5835 return IOCB_BUSY; 5836 } 5837 5838 /** 5839 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 5840 * @phba: Pointer to HBA context object. 5841 * @piocb: Pointer to command iocb. 5842 * @sglq: Pointer to the scatter gather queue object. 5843 * 5844 * This routine converts the bpl or bde that is in the IOCB 5845 * to a sgl list for the sli4 hardware. The physical address 5846 * of the bpl/bde is converted back to a virtual address. 5847 * If the IOCB contains a BPL then the list of BDE's is 5848 * converted to sli4_sge's. If the IOCB contains a single 5849 * BDE then it is converted to a single sli_sge. 5850 * The IOCB is still in cpu endianess so the contents of 5851 * the bpl can be used without byte swapping. 5852 * 5853 * Returns valid XRI = Success, NO_XRI = Failure. 5854 **/ 5855 static uint16_t 5856 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 5857 struct lpfc_sglq *sglq) 5858 { 5859 uint16_t xritag = NO_XRI; 5860 struct ulp_bde64 *bpl = NULL; 5861 struct ulp_bde64 bde; 5862 struct sli4_sge *sgl = NULL; 5863 IOCB_t *icmd; 5864 int numBdes = 0; 5865 int i = 0; 5866 5867 if (!piocbq || !sglq) 5868 return xritag; 5869 5870 sgl = (struct sli4_sge *)sglq->sgl; 5871 icmd = &piocbq->iocb; 5872 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5873 numBdes = icmd->un.genreq64.bdl.bdeSize / 5874 sizeof(struct ulp_bde64); 5875 /* The addrHigh and addrLow fields within the IOCB 5876 * have not been byteswapped yet so there is no 5877 * need to swap them back. 5878 */ 5879 bpl = (struct ulp_bde64 *) 5880 ((struct lpfc_dmabuf *)piocbq->context3)->virt; 5881 5882 if (!bpl) 5883 return xritag; 5884 5885 for (i = 0; i < numBdes; i++) { 5886 /* Should already be byte swapped. */ 5887 sgl->addr_hi = bpl->addrHigh; 5888 sgl->addr_lo = bpl->addrLow; 5889 5890 if ((i+1) == numBdes) 5891 bf_set(lpfc_sli4_sge_last, sgl, 1); 5892 else 5893 bf_set(lpfc_sli4_sge_last, sgl, 0); 5894 sgl->word2 = cpu_to_le32(sgl->word2); 5895 /* swap the size field back to the cpu so we 5896 * can assign it to the sgl. 5897 */ 5898 bde.tus.w = le32_to_cpu(bpl->tus.w); 5899 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 5900 bpl++; 5901 sgl++; 5902 } 5903 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 5904 /* The addrHigh and addrLow fields of the BDE have not 5905 * been byteswapped yet so they need to be swapped 5906 * before putting them in the sgl. 5907 */ 5908 sgl->addr_hi = 5909 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 5910 sgl->addr_lo = 5911 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 5912 bf_set(lpfc_sli4_sge_last, sgl, 1); 5913 sgl->word2 = cpu_to_le32(sgl->word2); 5914 sgl->sge_len = 5915 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 5916 } 5917 return sglq->sli4_xritag; 5918 } 5919 5920 /** 5921 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5922 * @phba: Pointer to HBA context object. 5923 * 5924 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 5925 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 5926 * held. 5927 * 5928 * Return: index into SLI4 fast-path FCP queue index. 5929 **/ 5930 static uint32_t 5931 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 5932 { 5933 ++phba->fcp_qidx; 5934 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 5935 phba->fcp_qidx = 0; 5936 5937 return phba->fcp_qidx; 5938 } 5939 5940 /** 5941 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 5942 * @phba: Pointer to HBA context object. 5943 * @piocb: Pointer to command iocb. 5944 * @wqe: Pointer to the work queue entry. 5945 * 5946 * This routine converts the iocb command to its Work Queue Entry 5947 * equivalent. The wqe pointer should not have any fields set when 5948 * this routine is called because it will memcpy over them. 5949 * This routine does not set the CQ_ID or the WQEC bits in the 5950 * wqe. 5951 * 5952 * Returns: 0 = Success, IOCB_ERROR = Failure. 5953 **/ 5954 static int 5955 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 5956 union lpfc_wqe *wqe) 5957 { 5958 uint32_t xmit_len = 0, total_len = 0; 5959 uint8_t ct = 0; 5960 uint32_t fip; 5961 uint32_t abort_tag; 5962 uint8_t command_type = ELS_COMMAND_NON_FIP; 5963 uint8_t cmnd; 5964 uint16_t xritag; 5965 uint16_t abrt_iotag; 5966 struct lpfc_iocbq *abrtiocbq; 5967 struct ulp_bde64 *bpl = NULL; 5968 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 5969 int numBdes, i; 5970 struct ulp_bde64 bde; 5971 5972 fip = phba->hba_flag & HBA_FIP_SUPPORT; 5973 /* The fcp commands will set command type */ 5974 if (iocbq->iocb_flag & LPFC_IO_FCP) 5975 command_type = FCP_COMMAND; 5976 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 5977 command_type = ELS_COMMAND_FIP; 5978 else 5979 command_type = ELS_COMMAND_NON_FIP; 5980 5981 /* Some of the fields are in the right position already */ 5982 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 5983 abort_tag = (uint32_t) iocbq->iotag; 5984 xritag = iocbq->sli4_xritag; 5985 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 5986 /* words0-2 bpl convert bde */ 5987 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5988 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 5989 sizeof(struct ulp_bde64); 5990 bpl = (struct ulp_bde64 *) 5991 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 5992 if (!bpl) 5993 return IOCB_ERROR; 5994 5995 /* Should already be byte swapped. */ 5996 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 5997 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 5998 /* swap the size field back to the cpu so we 5999 * can assign it to the sgl. 6000 */ 6001 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 6002 xmit_len = wqe->generic.bde.tus.f.bdeSize; 6003 total_len = 0; 6004 for (i = 0; i < numBdes; i++) { 6005 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 6006 total_len += bde.tus.f.bdeSize; 6007 } 6008 } else 6009 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 6010 6011 iocbq->iocb.ulpIoTag = iocbq->iotag; 6012 cmnd = iocbq->iocb.ulpCommand; 6013 6014 switch (iocbq->iocb.ulpCommand) { 6015 case CMD_ELS_REQUEST64_CR: 6016 if (!iocbq->iocb.ulpLe) { 6017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6018 "2007 Only Limited Edition cmd Format" 6019 " supported 0x%x\n", 6020 iocbq->iocb.ulpCommand); 6021 return IOCB_ERROR; 6022 } 6023 wqe->els_req.payload_len = xmit_len; 6024 /* Els_reguest64 has a TMO */ 6025 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 6026 iocbq->iocb.ulpTimeout); 6027 /* Need a VF for word 4 set the vf bit*/ 6028 bf_set(els_req64_vf, &wqe->els_req, 0); 6029 /* And a VFID for word 12 */ 6030 bf_set(els_req64_vfid, &wqe->els_req, 0); 6031 /* 6032 * Set ct field to 3, indicates that the context_tag field 6033 * contains the FCFI and remote N_Port_ID is 6034 * in word 5. 6035 */ 6036 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6037 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 6038 iocbq->iocb.ulpContext); 6039 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 6040 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 6041 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 6042 if (command_type == ELS_COMMAND_FIP) { 6043 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 6044 >> LPFC_FIP_ELS_ID_SHIFT); 6045 } 6046 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 6047 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 6048 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 6049 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 6050 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 6051 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 6052 break; 6053 case CMD_XMIT_SEQUENCE64_CX: 6054 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 6055 iocbq->iocb.un.ulpWord[3]); 6056 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 6057 iocbq->iocb.ulpContext); 6058 /* The entire sequence is transmitted for this IOCB */ 6059 xmit_len = total_len; 6060 cmnd = CMD_XMIT_SEQUENCE64_CR; 6061 case CMD_XMIT_SEQUENCE64_CR: 6062 /* word3 iocb=io_tag32 wqe=reserved */ 6063 wqe->xmit_sequence.rsvd3 = 0; 6064 /* word4 relative_offset memcpy */ 6065 /* word5 r_ctl/df_ctl memcpy */ 6066 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 6067 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 6068 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 6069 LPFC_WQE_IOD_WRITE); 6070 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 6071 LPFC_WQE_LENLOC_WORD12); 6072 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 6073 wqe->xmit_sequence.xmit_len = xmit_len; 6074 command_type = OTHER_COMMAND; 6075 break; 6076 case CMD_XMIT_BCAST64_CN: 6077 /* word3 iocb=iotag32 wqe=seq_payload_len */ 6078 wqe->xmit_bcast64.seq_payload_len = xmit_len; 6079 /* word4 iocb=rsvd wqe=rsvd */ 6080 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 6081 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 6082 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 6083 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6084 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 6085 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 6086 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 6087 LPFC_WQE_LENLOC_WORD3); 6088 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 6089 break; 6090 case CMD_FCP_IWRITE64_CR: 6091 command_type = FCP_COMMAND_DATA_OUT; 6092 /* word3 iocb=iotag wqe=payload_offset_len */ 6093 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 6094 wqe->fcp_iwrite.payload_offset_len = 6095 xmit_len + sizeof(struct fcp_rsp); 6096 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 6097 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 6098 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 6099 iocbq->iocb.ulpFCP2Rcvy); 6100 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 6101 /* Always open the exchange */ 6102 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 6103 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 6104 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 6105 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 6106 LPFC_WQE_LENLOC_WORD4); 6107 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 6108 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 6109 break; 6110 case CMD_FCP_IREAD64_CR: 6111 /* word3 iocb=iotag wqe=payload_offset_len */ 6112 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 6113 wqe->fcp_iread.payload_offset_len = 6114 xmit_len + sizeof(struct fcp_rsp); 6115 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 6116 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 6117 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 6118 iocbq->iocb.ulpFCP2Rcvy); 6119 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 6120 /* Always open the exchange */ 6121 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6122 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 6123 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 6124 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 6125 LPFC_WQE_LENLOC_WORD4); 6126 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 6127 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 6128 break; 6129 case CMD_FCP_ICMND64_CR: 6130 /* word3 iocb=IO_TAG wqe=reserved */ 6131 wqe->fcp_icmd.rsrvd3 = 0; 6132 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 6133 /* Always open the exchange */ 6134 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 6135 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 6136 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 6137 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 6138 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 6139 LPFC_WQE_LENLOC_NONE); 6140 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 6141 break; 6142 case CMD_GEN_REQUEST64_CR: 6143 /* word3 iocb=IO_TAG wqe=request_payload_len */ 6144 wqe->gen_req.request_payload_len = xmit_len; 6145 /* word4 iocb=parameter wqe=relative_offset memcpy */ 6146 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 6147 /* word6 context tag copied in memcpy */ 6148 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 6149 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6150 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6151 "2015 Invalid CT %x command 0x%x\n", 6152 ct, iocbq->iocb.ulpCommand); 6153 return IOCB_ERROR; 6154 } 6155 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 6156 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 6157 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 6158 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 6159 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 6160 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 6161 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 6162 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 6163 command_type = OTHER_COMMAND; 6164 break; 6165 case CMD_XMIT_ELS_RSP64_CX: 6166 /* words0-2 BDE memcpy */ 6167 /* word3 iocb=iotag32 wqe=response_payload_len */ 6168 wqe->xmit_els_rsp.response_payload_len = xmit_len; 6169 /* word4 iocb=did wge=rsvd. */ 6170 wqe->xmit_els_rsp.rsvd4 = 0; 6171 /* word5 iocb=rsvd wge=did */ 6172 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 6173 iocbq->iocb.un.elsreq64.remoteID); 6174 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 6175 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6176 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 6177 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6178 iocbq->iocb.ulpContext); 6179 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 6180 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 6181 iocbq->vport->vpi + phba->vpi_base); 6182 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 6183 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 6184 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 6185 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 6186 LPFC_WQE_LENLOC_WORD3); 6187 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 6188 command_type = OTHER_COMMAND; 6189 break; 6190 case CMD_CLOSE_XRI_CN: 6191 case CMD_ABORT_XRI_CN: 6192 case CMD_ABORT_XRI_CX: 6193 /* words 0-2 memcpy should be 0 rserved */ 6194 /* port will send abts */ 6195 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 6196 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 6197 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 6198 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 6199 } else 6200 fip = 0; 6201 6202 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 6203 /* 6204 * The link is down, or the command was ELS_FIP 6205 * so the fw does not need to send abts 6206 * on the wire. 6207 */ 6208 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 6209 else 6210 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6211 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6212 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 6213 wqe->abort_cmd.rsrvd5 = 0; 6214 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 6215 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6216 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6217 /* 6218 * The abort handler will send us CMD_ABORT_XRI_CN or 6219 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6220 */ 6221 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 6222 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 6223 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 6224 LPFC_WQE_LENLOC_NONE); 6225 cmnd = CMD_ABORT_XRI_CX; 6226 command_type = OTHER_COMMAND; 6227 xritag = 0; 6228 break; 6229 case CMD_XMIT_BLS_RSP64_CX: 6230 /* As BLS ABTS-ACC WQE is very different from other WQEs, 6231 * we re-construct this WQE here based on information in 6232 * iocbq from scratch. 6233 */ 6234 memset(wqe, 0, sizeof(union lpfc_wqe)); 6235 /* OX_ID is invariable to who sent ABTS to CT exchange */ 6236 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 6237 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); 6238 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == 6239 LPFC_ABTS_UNSOL_INT) { 6240 /* ABTS sent by initiator to CT exchange, the 6241 * RX_ID field will be filled with the newly 6242 * allocated responder XRI. 6243 */ 6244 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6245 iocbq->sli4_xritag); 6246 } else { 6247 /* ABTS sent by responder to CT exchange, the 6248 * RX_ID field will be filled with the responder 6249 * RX_ID from ABTS. 6250 */ 6251 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6252 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); 6253 } 6254 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 6255 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 6256 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 6257 iocbq->iocb.ulpContext); 6258 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 6259 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 6260 LPFC_WQE_LENLOC_NONE); 6261 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 6262 command_type = OTHER_COMMAND; 6263 break; 6264 case CMD_XRI_ABORTED_CX: 6265 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6266 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 6267 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 6268 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 6269 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 6270 default: 6271 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6272 "2014 Invalid command 0x%x\n", 6273 iocbq->iocb.ulpCommand); 6274 return IOCB_ERROR; 6275 break; 6276 } 6277 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 6278 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 6279 wqe->generic.wqe_com.abort_tag = abort_tag; 6280 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 6281 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 6282 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 6283 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 6284 return 0; 6285 } 6286 6287 /** 6288 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 6289 * @phba: Pointer to HBA context object. 6290 * @ring_number: SLI ring number to issue iocb on. 6291 * @piocb: Pointer to command iocb. 6292 * @flag: Flag indicating if this command can be put into txq. 6293 * 6294 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 6295 * an iocb command to an HBA with SLI-4 interface spec. 6296 * 6297 * This function is called with hbalock held. The function will return success 6298 * after it successfully submit the iocb to firmware or after adding to the 6299 * txq. 6300 **/ 6301 static int 6302 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 6303 struct lpfc_iocbq *piocb, uint32_t flag) 6304 { 6305 struct lpfc_sglq *sglq; 6306 union lpfc_wqe wqe; 6307 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6308 6309 if (piocb->sli4_xritag == NO_XRI) { 6310 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6311 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6312 sglq = NULL; 6313 else { 6314 if (pring->txq_cnt) { 6315 if (!(flag & SLI_IOCB_RET_IOCB)) { 6316 __lpfc_sli_ringtx_put(phba, 6317 pring, piocb); 6318 return IOCB_SUCCESS; 6319 } else { 6320 return IOCB_BUSY; 6321 } 6322 } else { 6323 sglq = __lpfc_sli_get_sglq(phba); 6324 if (!sglq) { 6325 if (!(flag & SLI_IOCB_RET_IOCB)) { 6326 __lpfc_sli_ringtx_put(phba, 6327 pring, 6328 piocb); 6329 return IOCB_SUCCESS; 6330 } else 6331 return IOCB_BUSY; 6332 } 6333 } 6334 } 6335 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 6336 sglq = NULL; /* These IO's already have an XRI and 6337 * a mapped sgl. 6338 */ 6339 } else { 6340 /* This is a continuation of a commandi,(CX) so this 6341 * sglq is on the active list 6342 */ 6343 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 6344 if (!sglq) 6345 return IOCB_ERROR; 6346 } 6347 6348 if (sglq) { 6349 piocb->sli4_xritag = sglq->sli4_xritag; 6350 6351 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 6352 return IOCB_ERROR; 6353 } 6354 6355 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 6356 return IOCB_ERROR; 6357 6358 if ((piocb->iocb_flag & LPFC_IO_FCP) || 6359 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 6360 /* 6361 * For FCP command IOCB, get a new WQ index to distribute 6362 * WQE across the WQsr. On the other hand, for abort IOCB, 6363 * it carries the same WQ index to the original command 6364 * IOCB. 6365 */ 6366 if (piocb->iocb_flag & LPFC_IO_FCP) 6367 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6368 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 6369 &wqe)) 6370 return IOCB_ERROR; 6371 } else { 6372 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 6373 return IOCB_ERROR; 6374 } 6375 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 6376 6377 return 0; 6378 } 6379 6380 /** 6381 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 6382 * 6383 * This routine wraps the actual lockless version for issusing IOCB function 6384 * pointer from the lpfc_hba struct. 6385 * 6386 * Return codes: 6387 * IOCB_ERROR - Error 6388 * IOCB_SUCCESS - Success 6389 * IOCB_BUSY - Busy 6390 **/ 6391 int 6392 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6393 struct lpfc_iocbq *piocb, uint32_t flag) 6394 { 6395 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 6396 } 6397 6398 /** 6399 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table 6400 * @phba: The hba struct for which this call is being executed. 6401 * @dev_grp: The HBA PCI-Device group number. 6402 * 6403 * This routine sets up the SLI interface API function jump table in @phba 6404 * struct. 6405 * Returns: 0 - success, -ENODEV - failure. 6406 **/ 6407 int 6408 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6409 { 6410 6411 switch (dev_grp) { 6412 case LPFC_PCI_DEV_LP: 6413 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 6414 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 6415 break; 6416 case LPFC_PCI_DEV_OC: 6417 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 6418 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 6419 break; 6420 default: 6421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6422 "1419 Invalid HBA PCI-device group: 0x%x\n", 6423 dev_grp); 6424 return -ENODEV; 6425 break; 6426 } 6427 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 6428 return 0; 6429 } 6430 6431 /** 6432 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6433 * @phba: Pointer to HBA context object. 6434 * @pring: Pointer to driver SLI ring object. 6435 * @piocb: Pointer to command iocb. 6436 * @flag: Flag indicating if this command can be put into txq. 6437 * 6438 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 6439 * function. This function gets the hbalock and calls 6440 * __lpfc_sli_issue_iocb function and will return the error returned 6441 * by __lpfc_sli_issue_iocb function. This wrapper is used by 6442 * functions which do not hold hbalock. 6443 **/ 6444 int 6445 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6446 struct lpfc_iocbq *piocb, uint32_t flag) 6447 { 6448 unsigned long iflags; 6449 int rc; 6450 6451 spin_lock_irqsave(&phba->hbalock, iflags); 6452 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 6453 spin_unlock_irqrestore(&phba->hbalock, iflags); 6454 6455 return rc; 6456 } 6457 6458 /** 6459 * lpfc_extra_ring_setup - Extra ring setup function 6460 * @phba: Pointer to HBA context object. 6461 * 6462 * This function is called while driver attaches with the 6463 * HBA to setup the extra ring. The extra ring is used 6464 * only when driver needs to support target mode functionality 6465 * or IP over FC functionalities. 6466 * 6467 * This function is called with no lock held. 6468 **/ 6469 static int 6470 lpfc_extra_ring_setup( struct lpfc_hba *phba) 6471 { 6472 struct lpfc_sli *psli; 6473 struct lpfc_sli_ring *pring; 6474 6475 psli = &phba->sli; 6476 6477 /* Adjust cmd/rsp ring iocb entries more evenly */ 6478 6479 /* Take some away from the FCP ring */ 6480 pring = &psli->ring[psli->fcp_ring]; 6481 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6482 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6483 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6484 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6485 6486 /* and give them to the extra ring */ 6487 pring = &psli->ring[psli->extra_ring]; 6488 6489 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6490 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6491 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6492 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6493 6494 /* Setup default profile for this ring */ 6495 pring->iotag_max = 4096; 6496 pring->num_mask = 1; 6497 pring->prt[0].profile = 0; /* Mask 0 */ 6498 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 6499 pring->prt[0].type = phba->cfg_multi_ring_type; 6500 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 6501 return 0; 6502 } 6503 6504 /** 6505 * lpfc_sli_async_event_handler - ASYNC iocb handler function 6506 * @phba: Pointer to HBA context object. 6507 * @pring: Pointer to driver SLI ring object. 6508 * @iocbq: Pointer to iocb object. 6509 * 6510 * This function is called by the slow ring event handler 6511 * function when there is an ASYNC event iocb in the ring. 6512 * This function is called with no lock held. 6513 * Currently this function handles only temperature related 6514 * ASYNC events. The function decodes the temperature sensor 6515 * event message and posts events for the management applications. 6516 **/ 6517 static void 6518 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 6519 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 6520 { 6521 IOCB_t *icmd; 6522 uint16_t evt_code; 6523 uint16_t temp; 6524 struct temp_event temp_event_data; 6525 struct Scsi_Host *shost; 6526 uint32_t *iocb_w; 6527 6528 icmd = &iocbq->iocb; 6529 evt_code = icmd->un.asyncstat.evt_code; 6530 temp = icmd->ulpContext; 6531 6532 if ((evt_code != ASYNC_TEMP_WARN) && 6533 (evt_code != ASYNC_TEMP_SAFE)) { 6534 iocb_w = (uint32_t *) icmd; 6535 lpfc_printf_log(phba, 6536 KERN_ERR, 6537 LOG_SLI, 6538 "0346 Ring %d handler: unexpected ASYNC_STATUS" 6539 " evt_code 0x%x\n" 6540 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 6541 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 6542 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 6543 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 6544 pring->ringno, 6545 icmd->un.asyncstat.evt_code, 6546 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 6547 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 6548 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 6549 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 6550 6551 return; 6552 } 6553 temp_event_data.data = (uint32_t)temp; 6554 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6555 if (evt_code == ASYNC_TEMP_WARN) { 6556 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6557 lpfc_printf_log(phba, 6558 KERN_ERR, 6559 LOG_TEMP, 6560 "0347 Adapter is very hot, please take " 6561 "corrective action. temperature : %d Celsius\n", 6562 temp); 6563 } 6564 if (evt_code == ASYNC_TEMP_SAFE) { 6565 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6566 lpfc_printf_log(phba, 6567 KERN_ERR, 6568 LOG_TEMP, 6569 "0340 Adapter temperature is OK now. " 6570 "temperature : %d Celsius\n", 6571 temp); 6572 } 6573 6574 /* Send temperature change event to applications */ 6575 shost = lpfc_shost_from_vport(phba->pport); 6576 fc_host_post_vendor_event(shost, fc_get_event_number(), 6577 sizeof(temp_event_data), (char *) &temp_event_data, 6578 LPFC_NL_VENDOR_ID); 6579 6580 } 6581 6582 6583 /** 6584 * lpfc_sli_setup - SLI ring setup function 6585 * @phba: Pointer to HBA context object. 6586 * 6587 * lpfc_sli_setup sets up rings of the SLI interface with 6588 * number of iocbs per ring and iotags. This function is 6589 * called while driver attach to the HBA and before the 6590 * interrupts are enabled. So there is no need for locking. 6591 * 6592 * This function always returns 0. 6593 **/ 6594 int 6595 lpfc_sli_setup(struct lpfc_hba *phba) 6596 { 6597 int i, totiocbsize = 0; 6598 struct lpfc_sli *psli = &phba->sli; 6599 struct lpfc_sli_ring *pring; 6600 6601 psli->num_rings = MAX_CONFIGURED_RINGS; 6602 psli->sli_flag = 0; 6603 psli->fcp_ring = LPFC_FCP_RING; 6604 psli->next_ring = LPFC_FCP_NEXT_RING; 6605 psli->extra_ring = LPFC_EXTRA_RING; 6606 6607 psli->iocbq_lookup = NULL; 6608 psli->iocbq_lookup_len = 0; 6609 psli->last_iotag = 0; 6610 6611 for (i = 0; i < psli->num_rings; i++) { 6612 pring = &psli->ring[i]; 6613 switch (i) { 6614 case LPFC_FCP_RING: /* ring 0 - FCP */ 6615 /* numCiocb and numRiocb are used in config_port */ 6616 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 6617 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 6618 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6619 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6620 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6621 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6622 pring->sizeCiocb = (phba->sli_rev == 3) ? 6623 SLI3_IOCB_CMD_SIZE : 6624 SLI2_IOCB_CMD_SIZE; 6625 pring->sizeRiocb = (phba->sli_rev == 3) ? 6626 SLI3_IOCB_RSP_SIZE : 6627 SLI2_IOCB_RSP_SIZE; 6628 pring->iotag_ctr = 0; 6629 pring->iotag_max = 6630 (phba->cfg_hba_queue_depth * 2); 6631 pring->fast_iotag = pring->iotag_max; 6632 pring->num_mask = 0; 6633 break; 6634 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 6635 /* numCiocb and numRiocb are used in config_port */ 6636 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 6637 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 6638 pring->sizeCiocb = (phba->sli_rev == 3) ? 6639 SLI3_IOCB_CMD_SIZE : 6640 SLI2_IOCB_CMD_SIZE; 6641 pring->sizeRiocb = (phba->sli_rev == 3) ? 6642 SLI3_IOCB_RSP_SIZE : 6643 SLI2_IOCB_RSP_SIZE; 6644 pring->iotag_max = phba->cfg_hba_queue_depth; 6645 pring->num_mask = 0; 6646 break; 6647 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 6648 /* numCiocb and numRiocb are used in config_port */ 6649 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 6650 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 6651 pring->sizeCiocb = (phba->sli_rev == 3) ? 6652 SLI3_IOCB_CMD_SIZE : 6653 SLI2_IOCB_CMD_SIZE; 6654 pring->sizeRiocb = (phba->sli_rev == 3) ? 6655 SLI3_IOCB_RSP_SIZE : 6656 SLI2_IOCB_RSP_SIZE; 6657 pring->fast_iotag = 0; 6658 pring->iotag_ctr = 0; 6659 pring->iotag_max = 4096; 6660 pring->lpfc_sli_rcv_async_status = 6661 lpfc_sli_async_event_handler; 6662 pring->num_mask = LPFC_MAX_RING_MASK; 6663 pring->prt[0].profile = 0; /* Mask 0 */ 6664 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 6665 pring->prt[0].type = FC_TYPE_ELS; 6666 pring->prt[0].lpfc_sli_rcv_unsol_event = 6667 lpfc_els_unsol_event; 6668 pring->prt[1].profile = 0; /* Mask 1 */ 6669 pring->prt[1].rctl = FC_RCTL_ELS_REP; 6670 pring->prt[1].type = FC_TYPE_ELS; 6671 pring->prt[1].lpfc_sli_rcv_unsol_event = 6672 lpfc_els_unsol_event; 6673 pring->prt[2].profile = 0; /* Mask 2 */ 6674 /* NameServer Inquiry */ 6675 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 6676 /* NameServer */ 6677 pring->prt[2].type = FC_TYPE_CT; 6678 pring->prt[2].lpfc_sli_rcv_unsol_event = 6679 lpfc_ct_unsol_event; 6680 pring->prt[3].profile = 0; /* Mask 3 */ 6681 /* NameServer response */ 6682 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 6683 /* NameServer */ 6684 pring->prt[3].type = FC_TYPE_CT; 6685 pring->prt[3].lpfc_sli_rcv_unsol_event = 6686 lpfc_ct_unsol_event; 6687 /* abort unsolicited sequence */ 6688 pring->prt[4].profile = 0; /* Mask 4 */ 6689 pring->prt[4].rctl = FC_RCTL_BA_ABTS; 6690 pring->prt[4].type = FC_TYPE_BLS; 6691 pring->prt[4].lpfc_sli_rcv_unsol_event = 6692 lpfc_sli4_ct_abort_unsol_event; 6693 break; 6694 } 6695 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 6696 (pring->numRiocb * pring->sizeRiocb); 6697 } 6698 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 6699 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 6700 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 6701 "SLI2 SLIM Data: x%x x%lx\n", 6702 phba->brd_no, totiocbsize, 6703 (unsigned long) MAX_SLIM_IOCB_SIZE); 6704 } 6705 if (phba->cfg_multi_ring_support == 2) 6706 lpfc_extra_ring_setup(phba); 6707 6708 return 0; 6709 } 6710 6711 /** 6712 * lpfc_sli_queue_setup - Queue initialization function 6713 * @phba: Pointer to HBA context object. 6714 * 6715 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 6716 * ring. This function also initializes ring indices of each ring. 6717 * This function is called during the initialization of the SLI 6718 * interface of an HBA. 6719 * This function is called with no lock held and always returns 6720 * 1. 6721 **/ 6722 int 6723 lpfc_sli_queue_setup(struct lpfc_hba *phba) 6724 { 6725 struct lpfc_sli *psli; 6726 struct lpfc_sli_ring *pring; 6727 int i; 6728 6729 psli = &phba->sli; 6730 spin_lock_irq(&phba->hbalock); 6731 INIT_LIST_HEAD(&psli->mboxq); 6732 INIT_LIST_HEAD(&psli->mboxq_cmpl); 6733 /* Initialize list headers for txq and txcmplq as double linked lists */ 6734 for (i = 0; i < psli->num_rings; i++) { 6735 pring = &psli->ring[i]; 6736 pring->ringno = i; 6737 pring->next_cmdidx = 0; 6738 pring->local_getidx = 0; 6739 pring->cmdidx = 0; 6740 INIT_LIST_HEAD(&pring->txq); 6741 INIT_LIST_HEAD(&pring->txcmplq); 6742 INIT_LIST_HEAD(&pring->iocb_continueq); 6743 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 6744 INIT_LIST_HEAD(&pring->postbufq); 6745 } 6746 spin_unlock_irq(&phba->hbalock); 6747 return 1; 6748 } 6749 6750 /** 6751 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 6752 * @phba: Pointer to HBA context object. 6753 * 6754 * This routine flushes the mailbox command subsystem. It will unconditionally 6755 * flush all the mailbox commands in the three possible stages in the mailbox 6756 * command sub-system: pending mailbox command queue; the outstanding mailbox 6757 * command; and completed mailbox command queue. It is caller's responsibility 6758 * to make sure that the driver is in the proper state to flush the mailbox 6759 * command sub-system. Namely, the posting of mailbox commands into the 6760 * pending mailbox command queue from the various clients must be stopped; 6761 * either the HBA is in a state that it will never works on the outstanding 6762 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 6763 * mailbox command has been completed. 6764 **/ 6765 static void 6766 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 6767 { 6768 LIST_HEAD(completions); 6769 struct lpfc_sli *psli = &phba->sli; 6770 LPFC_MBOXQ_t *pmb; 6771 unsigned long iflag; 6772 6773 /* Flush all the mailbox commands in the mbox system */ 6774 spin_lock_irqsave(&phba->hbalock, iflag); 6775 /* The pending mailbox command queue */ 6776 list_splice_init(&phba->sli.mboxq, &completions); 6777 /* The outstanding active mailbox command */ 6778 if (psli->mbox_active) { 6779 list_add_tail(&psli->mbox_active->list, &completions); 6780 psli->mbox_active = NULL; 6781 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6782 } 6783 /* The completed mailbox command queue */ 6784 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6785 spin_unlock_irqrestore(&phba->hbalock, iflag); 6786 6787 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 6788 while (!list_empty(&completions)) { 6789 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 6790 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 6791 if (pmb->mbox_cmpl) 6792 pmb->mbox_cmpl(phba, pmb); 6793 } 6794 } 6795 6796 /** 6797 * lpfc_sli_host_down - Vport cleanup function 6798 * @vport: Pointer to virtual port object. 6799 * 6800 * lpfc_sli_host_down is called to clean up the resources 6801 * associated with a vport before destroying virtual 6802 * port data structures. 6803 * This function does following operations: 6804 * - Free discovery resources associated with this virtual 6805 * port. 6806 * - Free iocbs associated with this virtual port in 6807 * the txq. 6808 * - Send abort for all iocb commands associated with this 6809 * vport in txcmplq. 6810 * 6811 * This function is called with no lock held and always returns 1. 6812 **/ 6813 int 6814 lpfc_sli_host_down(struct lpfc_vport *vport) 6815 { 6816 LIST_HEAD(completions); 6817 struct lpfc_hba *phba = vport->phba; 6818 struct lpfc_sli *psli = &phba->sli; 6819 struct lpfc_sli_ring *pring; 6820 struct lpfc_iocbq *iocb, *next_iocb; 6821 int i; 6822 unsigned long flags = 0; 6823 uint16_t prev_pring_flag; 6824 6825 lpfc_cleanup_discovery_resources(vport); 6826 6827 spin_lock_irqsave(&phba->hbalock, flags); 6828 for (i = 0; i < psli->num_rings; i++) { 6829 pring = &psli->ring[i]; 6830 prev_pring_flag = pring->flag; 6831 /* Only slow rings */ 6832 if (pring->ringno == LPFC_ELS_RING) { 6833 pring->flag |= LPFC_DEFERRED_RING_EVENT; 6834 /* Set the lpfc data pending flag */ 6835 set_bit(LPFC_DATA_READY, &phba->data_flags); 6836 } 6837 /* 6838 * Error everything on the txq since these iocbs have not been 6839 * given to the FW yet. 6840 */ 6841 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 6842 if (iocb->vport != vport) 6843 continue; 6844 list_move_tail(&iocb->list, &completions); 6845 pring->txq_cnt--; 6846 } 6847 6848 /* Next issue ABTS for everything on the txcmplq */ 6849 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 6850 list) { 6851 if (iocb->vport != vport) 6852 continue; 6853 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 6854 } 6855 6856 pring->flag = prev_pring_flag; 6857 } 6858 6859 spin_unlock_irqrestore(&phba->hbalock, flags); 6860 6861 /* Cancel all the IOCBs from the completions list */ 6862 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6863 IOERR_SLI_DOWN); 6864 return 1; 6865 } 6866 6867 /** 6868 * lpfc_sli_hba_down - Resource cleanup function for the HBA 6869 * @phba: Pointer to HBA context object. 6870 * 6871 * This function cleans up all iocb, buffers, mailbox commands 6872 * while shutting down the HBA. This function is called with no 6873 * lock held and always returns 1. 6874 * This function does the following to cleanup driver resources: 6875 * - Free discovery resources for each virtual port 6876 * - Cleanup any pending fabric iocbs 6877 * - Iterate through the iocb txq and free each entry 6878 * in the list. 6879 * - Free up any buffer posted to the HBA 6880 * - Free mailbox commands in the mailbox queue. 6881 **/ 6882 int 6883 lpfc_sli_hba_down(struct lpfc_hba *phba) 6884 { 6885 LIST_HEAD(completions); 6886 struct lpfc_sli *psli = &phba->sli; 6887 struct lpfc_sli_ring *pring; 6888 struct lpfc_dmabuf *buf_ptr; 6889 unsigned long flags = 0; 6890 int i; 6891 6892 /* Shutdown the mailbox command sub-system */ 6893 lpfc_sli_mbox_sys_shutdown(phba); 6894 6895 lpfc_hba_down_prep(phba); 6896 6897 lpfc_fabric_abort_hba(phba); 6898 6899 spin_lock_irqsave(&phba->hbalock, flags); 6900 for (i = 0; i < psli->num_rings; i++) { 6901 pring = &psli->ring[i]; 6902 /* Only slow rings */ 6903 if (pring->ringno == LPFC_ELS_RING) { 6904 pring->flag |= LPFC_DEFERRED_RING_EVENT; 6905 /* Set the lpfc data pending flag */ 6906 set_bit(LPFC_DATA_READY, &phba->data_flags); 6907 } 6908 6909 /* 6910 * Error everything on the txq since these iocbs have not been 6911 * given to the FW yet. 6912 */ 6913 list_splice_init(&pring->txq, &completions); 6914 pring->txq_cnt = 0; 6915 6916 } 6917 spin_unlock_irqrestore(&phba->hbalock, flags); 6918 6919 /* Cancel all the IOCBs from the completions list */ 6920 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6921 IOERR_SLI_DOWN); 6922 6923 spin_lock_irqsave(&phba->hbalock, flags); 6924 list_splice_init(&phba->elsbuf, &completions); 6925 phba->elsbuf_cnt = 0; 6926 phba->elsbuf_prev_cnt = 0; 6927 spin_unlock_irqrestore(&phba->hbalock, flags); 6928 6929 while (!list_empty(&completions)) { 6930 list_remove_head(&completions, buf_ptr, 6931 struct lpfc_dmabuf, list); 6932 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 6933 kfree(buf_ptr); 6934 } 6935 6936 /* Return any active mbox cmds */ 6937 del_timer_sync(&psli->mbox_tmo); 6938 6939 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 6940 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6941 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 6942 6943 return 1; 6944 } 6945 6946 /** 6947 * lpfc_sli_pcimem_bcopy - SLI memory copy function 6948 * @srcp: Source memory pointer. 6949 * @destp: Destination memory pointer. 6950 * @cnt: Number of words required to be copied. 6951 * 6952 * This function is used for copying data between driver memory 6953 * and the SLI memory. This function also changes the endianness 6954 * of each word if native endianness is different from SLI 6955 * endianness. This function can be called with or without 6956 * lock. 6957 **/ 6958 void 6959 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 6960 { 6961 uint32_t *src = srcp; 6962 uint32_t *dest = destp; 6963 uint32_t ldata; 6964 int i; 6965 6966 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 6967 ldata = *src; 6968 ldata = le32_to_cpu(ldata); 6969 *dest = ldata; 6970 src++; 6971 dest++; 6972 } 6973 } 6974 6975 6976 /** 6977 * lpfc_sli_bemem_bcopy - SLI memory copy function 6978 * @srcp: Source memory pointer. 6979 * @destp: Destination memory pointer. 6980 * @cnt: Number of words required to be copied. 6981 * 6982 * This function is used for copying data between a data structure 6983 * with big endian representation to local endianness. 6984 * This function can be called with or without lock. 6985 **/ 6986 void 6987 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 6988 { 6989 uint32_t *src = srcp; 6990 uint32_t *dest = destp; 6991 uint32_t ldata; 6992 int i; 6993 6994 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 6995 ldata = *src; 6996 ldata = be32_to_cpu(ldata); 6997 *dest = ldata; 6998 src++; 6999 dest++; 7000 } 7001 } 7002 7003 /** 7004 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 7005 * @phba: Pointer to HBA context object. 7006 * @pring: Pointer to driver SLI ring object. 7007 * @mp: Pointer to driver buffer object. 7008 * 7009 * This function is called with no lock held. 7010 * It always return zero after adding the buffer to the postbufq 7011 * buffer list. 7012 **/ 7013 int 7014 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7015 struct lpfc_dmabuf *mp) 7016 { 7017 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 7018 later */ 7019 spin_lock_irq(&phba->hbalock); 7020 list_add_tail(&mp->list, &pring->postbufq); 7021 pring->postbufq_cnt++; 7022 spin_unlock_irq(&phba->hbalock); 7023 return 0; 7024 } 7025 7026 /** 7027 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 7028 * @phba: Pointer to HBA context object. 7029 * 7030 * When HBQ is enabled, buffers are searched based on tags. This function 7031 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 7032 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 7033 * does not conflict with tags of buffer posted for unsolicited events. 7034 * The function returns the allocated tag. The function is called with 7035 * no locks held. 7036 **/ 7037 uint32_t 7038 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 7039 { 7040 spin_lock_irq(&phba->hbalock); 7041 phba->buffer_tag_count++; 7042 /* 7043 * Always set the QUE_BUFTAG_BIT to distiguish between 7044 * a tag assigned by HBQ. 7045 */ 7046 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 7047 spin_unlock_irq(&phba->hbalock); 7048 return phba->buffer_tag_count; 7049 } 7050 7051 /** 7052 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 7053 * @phba: Pointer to HBA context object. 7054 * @pring: Pointer to driver SLI ring object. 7055 * @tag: Buffer tag. 7056 * 7057 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 7058 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 7059 * iocb is posted to the response ring with the tag of the buffer. 7060 * This function searches the pring->postbufq list using the tag 7061 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 7062 * iocb. If the buffer is found then lpfc_dmabuf object of the 7063 * buffer is returned to the caller else NULL is returned. 7064 * This function is called with no lock held. 7065 **/ 7066 struct lpfc_dmabuf * 7067 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7068 uint32_t tag) 7069 { 7070 struct lpfc_dmabuf *mp, *next_mp; 7071 struct list_head *slp = &pring->postbufq; 7072 7073 /* Search postbufq, from the begining, looking for a match on tag */ 7074 spin_lock_irq(&phba->hbalock); 7075 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7076 if (mp->buffer_tag == tag) { 7077 list_del_init(&mp->list); 7078 pring->postbufq_cnt--; 7079 spin_unlock_irq(&phba->hbalock); 7080 return mp; 7081 } 7082 } 7083 7084 spin_unlock_irq(&phba->hbalock); 7085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7086 "0402 Cannot find virtual addr for buffer tag on " 7087 "ring %d Data x%lx x%p x%p x%x\n", 7088 pring->ringno, (unsigned long) tag, 7089 slp->next, slp->prev, pring->postbufq_cnt); 7090 7091 return NULL; 7092 } 7093 7094 /** 7095 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 7096 * @phba: Pointer to HBA context object. 7097 * @pring: Pointer to driver SLI ring object. 7098 * @phys: DMA address of the buffer. 7099 * 7100 * This function searches the buffer list using the dma_address 7101 * of unsolicited event to find the driver's lpfc_dmabuf object 7102 * corresponding to the dma_address. The function returns the 7103 * lpfc_dmabuf object if a buffer is found else it returns NULL. 7104 * This function is called by the ct and els unsolicited event 7105 * handlers to get the buffer associated with the unsolicited 7106 * event. 7107 * 7108 * This function is called with no lock held. 7109 **/ 7110 struct lpfc_dmabuf * 7111 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7112 dma_addr_t phys) 7113 { 7114 struct lpfc_dmabuf *mp, *next_mp; 7115 struct list_head *slp = &pring->postbufq; 7116 7117 /* Search postbufq, from the begining, looking for a match on phys */ 7118 spin_lock_irq(&phba->hbalock); 7119 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7120 if (mp->phys == phys) { 7121 list_del_init(&mp->list); 7122 pring->postbufq_cnt--; 7123 spin_unlock_irq(&phba->hbalock); 7124 return mp; 7125 } 7126 } 7127 7128 spin_unlock_irq(&phba->hbalock); 7129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7130 "0410 Cannot find virtual addr for mapped buf on " 7131 "ring %d Data x%llx x%p x%p x%x\n", 7132 pring->ringno, (unsigned long long)phys, 7133 slp->next, slp->prev, pring->postbufq_cnt); 7134 return NULL; 7135 } 7136 7137 /** 7138 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 7139 * @phba: Pointer to HBA context object. 7140 * @cmdiocb: Pointer to driver command iocb object. 7141 * @rspiocb: Pointer to driver response iocb object. 7142 * 7143 * This function is the completion handler for the abort iocbs for 7144 * ELS commands. This function is called from the ELS ring event 7145 * handler with no lock held. This function frees memory resources 7146 * associated with the abort iocb. 7147 **/ 7148 static void 7149 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7150 struct lpfc_iocbq *rspiocb) 7151 { 7152 IOCB_t *irsp = &rspiocb->iocb; 7153 uint16_t abort_iotag, abort_context; 7154 struct lpfc_iocbq *abort_iocb; 7155 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7156 7157 abort_iocb = NULL; 7158 7159 if (irsp->ulpStatus) { 7160 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 7161 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 7162 7163 spin_lock_irq(&phba->hbalock); 7164 if (phba->sli_rev < LPFC_SLI_REV4) { 7165 if (abort_iotag != 0 && 7166 abort_iotag <= phba->sli.last_iotag) 7167 abort_iocb = 7168 phba->sli.iocbq_lookup[abort_iotag]; 7169 } else 7170 /* For sli4 the abort_tag is the XRI, 7171 * so the abort routine puts the iotag of the iocb 7172 * being aborted in the context field of the abort 7173 * IOCB. 7174 */ 7175 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 7176 7177 /* 7178 * If the iocb is not found in Firmware queue the iocb 7179 * might have completed already. Do not free it again. 7180 */ 7181 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 7182 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) { 7183 spin_unlock_irq(&phba->hbalock); 7184 lpfc_sli_release_iocbq(phba, cmdiocb); 7185 return; 7186 } 7187 /* For SLI4 the ulpContext field for abort IOCB 7188 * holds the iotag of the IOCB being aborted so 7189 * the local abort_context needs to be reset to 7190 * match the aborted IOCBs ulpContext. 7191 */ 7192 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) 7193 abort_context = abort_iocb->iocb.ulpContext; 7194 } 7195 7196 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 7197 "0327 Cannot abort els iocb %p " 7198 "with tag %x context %x, abort status %x, " 7199 "abort code %x\n", 7200 abort_iocb, abort_iotag, abort_context, 7201 irsp->ulpStatus, irsp->un.ulpWord[4]); 7202 /* 7203 * make sure we have the right iocbq before taking it 7204 * off the txcmplq and try to call completion routine. 7205 */ 7206 if (!abort_iocb || 7207 abort_iocb->iocb.ulpContext != abort_context || 7208 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 7209 spin_unlock_irq(&phba->hbalock); 7210 else if (phba->sli_rev < LPFC_SLI_REV4) { 7211 /* 7212 * leave the SLI4 aborted command on the txcmplq 7213 * list and the command complete WCQE's XB bit 7214 * will tell whether the SGL (XRI) can be released 7215 * immediately or to the aborted SGL list for the 7216 * following abort XRI from the HBA. 7217 */ 7218 list_del_init(&abort_iocb->list); 7219 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) { 7220 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 7221 pring->txcmplq_cnt--; 7222 } 7223 7224 /* Firmware could still be in progress of DMAing 7225 * payload, so don't free data buffer till after 7226 * a hbeat. 7227 */ 7228 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7229 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7230 spin_unlock_irq(&phba->hbalock); 7231 7232 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7233 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; 7234 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7235 } else 7236 spin_unlock_irq(&phba->hbalock); 7237 } 7238 7239 lpfc_sli_release_iocbq(phba, cmdiocb); 7240 return; 7241 } 7242 7243 /** 7244 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 7245 * @phba: Pointer to HBA context object. 7246 * @cmdiocb: Pointer to driver command iocb object. 7247 * @rspiocb: Pointer to driver response iocb object. 7248 * 7249 * The function is called from SLI ring event handler with no 7250 * lock held. This function is the completion handler for ELS commands 7251 * which are aborted. The function frees memory resources used for 7252 * the aborted ELS commands. 7253 **/ 7254 static void 7255 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7256 struct lpfc_iocbq *rspiocb) 7257 { 7258 IOCB_t *irsp = &rspiocb->iocb; 7259 7260 /* ELS cmd tag <ulpIoTag> completes */ 7261 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 7262 "0139 Ignoring ELS cmd tag x%x completion Data: " 7263 "x%x x%x x%x\n", 7264 irsp->ulpIoTag, irsp->ulpStatus, 7265 irsp->un.ulpWord[4], irsp->ulpTimeout); 7266 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 7267 lpfc_ct_free_iocb(phba, cmdiocb); 7268 else 7269 lpfc_els_free_iocb(phba, cmdiocb); 7270 return; 7271 } 7272 7273 /** 7274 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 7275 * @phba: Pointer to HBA context object. 7276 * @pring: Pointer to driver SLI ring object. 7277 * @cmdiocb: Pointer to driver command iocb object. 7278 * 7279 * This function issues an abort iocb for the provided command iocb down to 7280 * the port. Other than the case the outstanding command iocb is an abort 7281 * request, this function issues abort out unconditionally. This function is 7282 * called with hbalock held. The function returns 0 when it fails due to 7283 * memory allocation failure or when the command iocb is an abort request. 7284 **/ 7285 static int 7286 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7287 struct lpfc_iocbq *cmdiocb) 7288 { 7289 struct lpfc_vport *vport = cmdiocb->vport; 7290 struct lpfc_iocbq *abtsiocbp; 7291 IOCB_t *icmd = NULL; 7292 IOCB_t *iabt = NULL; 7293 int retval; 7294 7295 /* 7296 * There are certain command types we don't want to abort. And we 7297 * don't want to abort commands that are already in the process of 7298 * being aborted. 7299 */ 7300 icmd = &cmdiocb->iocb; 7301 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7302 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7303 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7304 return 0; 7305 7306 /* issue ABTS for this IOCB based on iotag */ 7307 abtsiocbp = __lpfc_sli_get_iocbq(phba); 7308 if (abtsiocbp == NULL) 7309 return 0; 7310 7311 /* This signals the response to set the correct status 7312 * before calling the completion handler 7313 */ 7314 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 7315 7316 iabt = &abtsiocbp->iocb; 7317 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7318 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7319 if (phba->sli_rev == LPFC_SLI_REV4) { 7320 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7321 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 7322 } 7323 else 7324 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7325 iabt->ulpLe = 1; 7326 iabt->ulpClass = icmd->ulpClass; 7327 7328 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 7329 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 7330 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 7331 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 7332 7333 if (phba->link_state >= LPFC_LINK_UP) 7334 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7335 else 7336 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 7337 7338 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 7339 7340 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 7341 "0339 Abort xri x%x, original iotag x%x, " 7342 "abort cmd iotag x%x\n", 7343 iabt->un.acxri.abortIoTag, 7344 iabt->un.acxri.abortContextTag, 7345 abtsiocbp->iotag); 7346 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 7347 7348 if (retval) 7349 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7350 7351 /* 7352 * Caller to this routine should check for IOCB_ERROR 7353 * and handle it properly. This routine no longer removes 7354 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7355 */ 7356 return retval; 7357 } 7358 7359 /** 7360 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7361 * @phba: Pointer to HBA context object. 7362 * @pring: Pointer to driver SLI ring object. 7363 * @cmdiocb: Pointer to driver command iocb object. 7364 * 7365 * This function issues an abort iocb for the provided command iocb. In case 7366 * of unloading, the abort iocb will not be issued to commands on the ELS 7367 * ring. Instead, the callback function shall be changed to those commands 7368 * so that nothing happens when them finishes. This function is called with 7369 * hbalock held. The function returns 0 when the command iocb is an abort 7370 * request. 7371 **/ 7372 int 7373 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7374 struct lpfc_iocbq *cmdiocb) 7375 { 7376 struct lpfc_vport *vport = cmdiocb->vport; 7377 int retval = IOCB_ERROR; 7378 IOCB_t *icmd = NULL; 7379 7380 /* 7381 * There are certain command types we don't want to abort. And we 7382 * don't want to abort commands that are already in the process of 7383 * being aborted. 7384 */ 7385 icmd = &cmdiocb->iocb; 7386 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7387 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7388 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7389 return 0; 7390 7391 /* 7392 * If we're unloading, don't abort iocb on the ELS ring, but change 7393 * the callback so that nothing happens when it finishes. 7394 */ 7395 if ((vport->load_flag & FC_UNLOADING) && 7396 (pring->ringno == LPFC_ELS_RING)) { 7397 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7398 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7399 else 7400 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7401 goto abort_iotag_exit; 7402 } 7403 7404 /* Now, we try to issue the abort to the cmdiocb out */ 7405 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 7406 7407 abort_iotag_exit: 7408 /* 7409 * Caller to this routine should check for IOCB_ERROR 7410 * and handle it properly. This routine no longer removes 7411 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7412 */ 7413 return retval; 7414 } 7415 7416 /** 7417 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 7418 * @phba: Pointer to HBA context object. 7419 * @pring: Pointer to driver SLI ring object. 7420 * 7421 * This function aborts all iocbs in the given ring and frees all the iocb 7422 * objects in txq. This function issues abort iocbs unconditionally for all 7423 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 7424 * to complete before the return of this function. The caller is not required 7425 * to hold any locks. 7426 **/ 7427 static void 7428 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 7429 { 7430 LIST_HEAD(completions); 7431 struct lpfc_iocbq *iocb, *next_iocb; 7432 7433 if (pring->ringno == LPFC_ELS_RING) 7434 lpfc_fabric_abort_hba(phba); 7435 7436 spin_lock_irq(&phba->hbalock); 7437 7438 /* Take off all the iocbs on txq for cancelling */ 7439 list_splice_init(&pring->txq, &completions); 7440 pring->txq_cnt = 0; 7441 7442 /* Next issue ABTS for everything on the txcmplq */ 7443 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 7444 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 7445 7446 spin_unlock_irq(&phba->hbalock); 7447 7448 /* Cancel all the IOCBs from the completions list */ 7449 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7450 IOERR_SLI_ABORTED); 7451 } 7452 7453 /** 7454 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 7455 * @phba: pointer to lpfc HBA data structure. 7456 * 7457 * This routine will abort all pending and outstanding iocbs to an HBA. 7458 **/ 7459 void 7460 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 7461 { 7462 struct lpfc_sli *psli = &phba->sli; 7463 struct lpfc_sli_ring *pring; 7464 int i; 7465 7466 for (i = 0; i < psli->num_rings; i++) { 7467 pring = &psli->ring[i]; 7468 lpfc_sli_iocb_ring_abort(phba, pring); 7469 } 7470 } 7471 7472 /** 7473 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 7474 * @iocbq: Pointer to driver iocb object. 7475 * @vport: Pointer to driver virtual port object. 7476 * @tgt_id: SCSI ID of the target. 7477 * @lun_id: LUN ID of the scsi device. 7478 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 7479 * 7480 * This function acts as an iocb filter for functions which abort or count 7481 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 7482 * 0 if the filtering criteria is met for the given iocb and will return 7483 * 1 if the filtering criteria is not met. 7484 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 7485 * given iocb is for the SCSI device specified by vport, tgt_id and 7486 * lun_id parameter. 7487 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 7488 * given iocb is for the SCSI target specified by vport and tgt_id 7489 * parameters. 7490 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 7491 * given iocb is for the SCSI host associated with the given vport. 7492 * This function is called with no locks held. 7493 **/ 7494 static int 7495 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 7496 uint16_t tgt_id, uint64_t lun_id, 7497 lpfc_ctx_cmd ctx_cmd) 7498 { 7499 struct lpfc_scsi_buf *lpfc_cmd; 7500 int rc = 1; 7501 7502 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 7503 return rc; 7504 7505 if (iocbq->vport != vport) 7506 return rc; 7507 7508 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 7509 7510 if (lpfc_cmd->pCmd == NULL) 7511 return rc; 7512 7513 switch (ctx_cmd) { 7514 case LPFC_CTX_LUN: 7515 if ((lpfc_cmd->rdata->pnode) && 7516 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 7517 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 7518 rc = 0; 7519 break; 7520 case LPFC_CTX_TGT: 7521 if ((lpfc_cmd->rdata->pnode) && 7522 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 7523 rc = 0; 7524 break; 7525 case LPFC_CTX_HOST: 7526 rc = 0; 7527 break; 7528 default: 7529 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 7530 __func__, ctx_cmd); 7531 break; 7532 } 7533 7534 return rc; 7535 } 7536 7537 /** 7538 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 7539 * @vport: Pointer to virtual port. 7540 * @tgt_id: SCSI ID of the target. 7541 * @lun_id: LUN ID of the scsi device. 7542 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 7543 * 7544 * This function returns number of FCP commands pending for the vport. 7545 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 7546 * commands pending on the vport associated with SCSI device specified 7547 * by tgt_id and lun_id parameters. 7548 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 7549 * commands pending on the vport associated with SCSI target specified 7550 * by tgt_id parameter. 7551 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 7552 * commands pending on the vport. 7553 * This function returns the number of iocbs which satisfy the filter. 7554 * This function is called without any lock held. 7555 **/ 7556 int 7557 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 7558 lpfc_ctx_cmd ctx_cmd) 7559 { 7560 struct lpfc_hba *phba = vport->phba; 7561 struct lpfc_iocbq *iocbq; 7562 int sum, i; 7563 7564 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 7565 iocbq = phba->sli.iocbq_lookup[i]; 7566 7567 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 7568 ctx_cmd) == 0) 7569 sum++; 7570 } 7571 7572 return sum; 7573 } 7574 7575 /** 7576 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 7577 * @phba: Pointer to HBA context object 7578 * @cmdiocb: Pointer to command iocb object. 7579 * @rspiocb: Pointer to response iocb object. 7580 * 7581 * This function is called when an aborted FCP iocb completes. This 7582 * function is called by the ring event handler with no lock held. 7583 * This function frees the iocb. 7584 **/ 7585 void 7586 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7587 struct lpfc_iocbq *rspiocb) 7588 { 7589 lpfc_sli_release_iocbq(phba, cmdiocb); 7590 return; 7591 } 7592 7593 /** 7594 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 7595 * @vport: Pointer to virtual port. 7596 * @pring: Pointer to driver SLI ring object. 7597 * @tgt_id: SCSI ID of the target. 7598 * @lun_id: LUN ID of the scsi device. 7599 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 7600 * 7601 * This function sends an abort command for every SCSI command 7602 * associated with the given virtual port pending on the ring 7603 * filtered by lpfc_sli_validate_fcp_iocb function. 7604 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 7605 * FCP iocbs associated with lun specified by tgt_id and lun_id 7606 * parameters 7607 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 7608 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 7609 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 7610 * FCP iocbs associated with virtual port. 7611 * This function returns number of iocbs it failed to abort. 7612 * This function is called with no locks held. 7613 **/ 7614 int 7615 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 7616 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 7617 { 7618 struct lpfc_hba *phba = vport->phba; 7619 struct lpfc_iocbq *iocbq; 7620 struct lpfc_iocbq *abtsiocb; 7621 IOCB_t *cmd = NULL; 7622 int errcnt = 0, ret_val = 0; 7623 int i; 7624 7625 for (i = 1; i <= phba->sli.last_iotag; i++) { 7626 iocbq = phba->sli.iocbq_lookup[i]; 7627 7628 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 7629 abort_cmd) != 0) 7630 continue; 7631 7632 /* issue ABTS for this IOCB based on iotag */ 7633 abtsiocb = lpfc_sli_get_iocbq(phba); 7634 if (abtsiocb == NULL) { 7635 errcnt++; 7636 continue; 7637 } 7638 7639 cmd = &iocbq->iocb; 7640 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7641 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7642 if (phba->sli_rev == LPFC_SLI_REV4) 7643 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 7644 else 7645 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7646 abtsiocb->iocb.ulpLe = 1; 7647 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7648 abtsiocb->vport = phba->pport; 7649 7650 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 7651 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 7652 if (iocbq->iocb_flag & LPFC_IO_FCP) 7653 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 7654 7655 if (lpfc_is_link_up(phba)) 7656 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 7657 else 7658 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 7659 7660 /* Setup callback routine and issue the command. */ 7661 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7662 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 7663 abtsiocb, 0); 7664 if (ret_val == IOCB_ERROR) { 7665 lpfc_sli_release_iocbq(phba, abtsiocb); 7666 errcnt++; 7667 continue; 7668 } 7669 } 7670 7671 return errcnt; 7672 } 7673 7674 /** 7675 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 7676 * @phba: Pointer to HBA context object. 7677 * @cmdiocbq: Pointer to command iocb. 7678 * @rspiocbq: Pointer to response iocb. 7679 * 7680 * This function is the completion handler for iocbs issued using 7681 * lpfc_sli_issue_iocb_wait function. This function is called by the 7682 * ring event handler function without any lock held. This function 7683 * can be called from both worker thread context and interrupt 7684 * context. This function also can be called from other thread which 7685 * cleans up the SLI layer objects. 7686 * This function copy the contents of the response iocb to the 7687 * response iocb memory object provided by the caller of 7688 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 7689 * sleeps for the iocb completion. 7690 **/ 7691 static void 7692 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 7693 struct lpfc_iocbq *cmdiocbq, 7694 struct lpfc_iocbq *rspiocbq) 7695 { 7696 wait_queue_head_t *pdone_q; 7697 unsigned long iflags; 7698 struct lpfc_scsi_buf *lpfc_cmd; 7699 7700 spin_lock_irqsave(&phba->hbalock, iflags); 7701 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7702 if (cmdiocbq->context2 && rspiocbq) 7703 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7704 &rspiocbq->iocb, sizeof(IOCB_t)); 7705 7706 /* Set the exchange busy flag for task management commands */ 7707 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 7708 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 7709 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 7710 cur_iocbq); 7711 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 7712 } 7713 7714 pdone_q = cmdiocbq->context_un.wait_queue; 7715 if (pdone_q) 7716 wake_up(pdone_q); 7717 spin_unlock_irqrestore(&phba->hbalock, iflags); 7718 return; 7719 } 7720 7721 /** 7722 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 7723 * @phba: Pointer to HBA context object.. 7724 * @piocbq: Pointer to command iocb. 7725 * @flag: Flag to test. 7726 * 7727 * This routine grabs the hbalock and then test the iocb_flag to 7728 * see if the passed in flag is set. 7729 * Returns: 7730 * 1 if flag is set. 7731 * 0 if flag is not set. 7732 **/ 7733 static int 7734 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 7735 struct lpfc_iocbq *piocbq, uint32_t flag) 7736 { 7737 unsigned long iflags; 7738 int ret; 7739 7740 spin_lock_irqsave(&phba->hbalock, iflags); 7741 ret = piocbq->iocb_flag & flag; 7742 spin_unlock_irqrestore(&phba->hbalock, iflags); 7743 return ret; 7744 7745 } 7746 7747 /** 7748 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 7749 * @phba: Pointer to HBA context object.. 7750 * @pring: Pointer to sli ring. 7751 * @piocb: Pointer to command iocb. 7752 * @prspiocbq: Pointer to response iocb. 7753 * @timeout: Timeout in number of seconds. 7754 * 7755 * This function issues the iocb to firmware and waits for the 7756 * iocb to complete. If the iocb command is not 7757 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 7758 * Caller should not free the iocb resources if this function 7759 * returns IOCB_TIMEDOUT. 7760 * The function waits for the iocb completion using an 7761 * non-interruptible wait. 7762 * This function will sleep while waiting for iocb completion. 7763 * So, this function should not be called from any context which 7764 * does not allow sleeping. Due to the same reason, this function 7765 * cannot be called with interrupt disabled. 7766 * This function assumes that the iocb completions occur while 7767 * this function sleep. So, this function cannot be called from 7768 * the thread which process iocb completion for this ring. 7769 * This function clears the iocb_flag of the iocb object before 7770 * issuing the iocb and the iocb completion handler sets this 7771 * flag and wakes this thread when the iocb completes. 7772 * The contents of the response iocb will be copied to prspiocbq 7773 * by the completion handler when the command completes. 7774 * This function returns IOCB_SUCCESS when success. 7775 * This function is called with no lock held. 7776 **/ 7777 int 7778 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7779 uint32_t ring_number, 7780 struct lpfc_iocbq *piocb, 7781 struct lpfc_iocbq *prspiocbq, 7782 uint32_t timeout) 7783 { 7784 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 7785 long timeleft, timeout_req = 0; 7786 int retval = IOCB_SUCCESS; 7787 uint32_t creg_val; 7788 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7789 /* 7790 * If the caller has provided a response iocbq buffer, then context2 7791 * is NULL or its an error. 7792 */ 7793 if (prspiocbq) { 7794 if (piocb->context2) 7795 return IOCB_ERROR; 7796 piocb->context2 = prspiocbq; 7797 } 7798 7799 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 7800 piocb->context_un.wait_queue = &done_q; 7801 piocb->iocb_flag &= ~LPFC_IO_WAKE; 7802 7803 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7804 creg_val = readl(phba->HCregaddr); 7805 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 7806 writel(creg_val, phba->HCregaddr); 7807 readl(phba->HCregaddr); /* flush */ 7808 } 7809 7810 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 7811 SLI_IOCB_RET_IOCB); 7812 if (retval == IOCB_SUCCESS) { 7813 timeout_req = timeout * HZ; 7814 timeleft = wait_event_timeout(done_q, 7815 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 7816 timeout_req); 7817 7818 if (piocb->iocb_flag & LPFC_IO_WAKE) { 7819 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7820 "0331 IOCB wake signaled\n"); 7821 } else if (timeleft == 0) { 7822 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7823 "0338 IOCB wait timeout error - no " 7824 "wake response Data x%x\n", timeout); 7825 retval = IOCB_TIMEDOUT; 7826 } else { 7827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7828 "0330 IOCB wake NOT set, " 7829 "Data x%x x%lx\n", 7830 timeout, (timeleft / jiffies)); 7831 retval = IOCB_TIMEDOUT; 7832 } 7833 } else if (retval == IOCB_BUSY) { 7834 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7835 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 7836 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 7837 return retval; 7838 } else { 7839 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7840 "0332 IOCB wait issue failed, Data x%x\n", 7841 retval); 7842 retval = IOCB_ERROR; 7843 } 7844 7845 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 7846 creg_val = readl(phba->HCregaddr); 7847 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 7848 writel(creg_val, phba->HCregaddr); 7849 readl(phba->HCregaddr); /* flush */ 7850 } 7851 7852 if (prspiocbq) 7853 piocb->context2 = NULL; 7854 7855 piocb->context_un.wait_queue = NULL; 7856 piocb->iocb_cmpl = NULL; 7857 return retval; 7858 } 7859 7860 /** 7861 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 7862 * @phba: Pointer to HBA context object. 7863 * @pmboxq: Pointer to driver mailbox object. 7864 * @timeout: Timeout in number of seconds. 7865 * 7866 * This function issues the mailbox to firmware and waits for the 7867 * mailbox command to complete. If the mailbox command is not 7868 * completed within timeout seconds, it returns MBX_TIMEOUT. 7869 * The function waits for the mailbox completion using an 7870 * interruptible wait. If the thread is woken up due to a 7871 * signal, MBX_TIMEOUT error is returned to the caller. Caller 7872 * should not free the mailbox resources, if this function returns 7873 * MBX_TIMEOUT. 7874 * This function will sleep while waiting for mailbox completion. 7875 * So, this function should not be called from any context which 7876 * does not allow sleeping. Due to the same reason, this function 7877 * cannot be called with interrupt disabled. 7878 * This function assumes that the mailbox completion occurs while 7879 * this function sleep. So, this function cannot be called from 7880 * the worker thread which processes mailbox completion. 7881 * This function is called in the context of HBA management 7882 * applications. 7883 * This function returns MBX_SUCCESS when successful. 7884 * This function is called with no lock held. 7885 **/ 7886 int 7887 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 7888 uint32_t timeout) 7889 { 7890 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 7891 int retval; 7892 unsigned long flag; 7893 7894 /* The caller must leave context1 empty. */ 7895 if (pmboxq->context1) 7896 return MBX_NOT_FINISHED; 7897 7898 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 7899 /* setup wake call as IOCB callback */ 7900 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 7901 /* setup context field to pass wait_queue pointer to wake function */ 7902 pmboxq->context1 = &done_q; 7903 7904 /* now issue the command */ 7905 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 7906 7907 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 7908 wait_event_interruptible_timeout(done_q, 7909 pmboxq->mbox_flag & LPFC_MBX_WAKE, 7910 timeout * HZ); 7911 7912 spin_lock_irqsave(&phba->hbalock, flag); 7913 pmboxq->context1 = NULL; 7914 /* 7915 * if LPFC_MBX_WAKE flag is set the mailbox is completed 7916 * else do not free the resources. 7917 */ 7918 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 7919 retval = MBX_SUCCESS; 7920 lpfc_sli4_swap_str(phba, pmboxq); 7921 } else { 7922 retval = MBX_TIMEOUT; 7923 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7924 } 7925 spin_unlock_irqrestore(&phba->hbalock, flag); 7926 } 7927 7928 return retval; 7929 } 7930 7931 /** 7932 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 7933 * @phba: Pointer to HBA context. 7934 * 7935 * This function is called to shutdown the driver's mailbox sub-system. 7936 * It first marks the mailbox sub-system is in a block state to prevent 7937 * the asynchronous mailbox command from issued off the pending mailbox 7938 * command queue. If the mailbox command sub-system shutdown is due to 7939 * HBA error conditions such as EEH or ERATT, this routine shall invoke 7940 * the mailbox sub-system flush routine to forcefully bring down the 7941 * mailbox sub-system. Otherwise, if it is due to normal condition (such 7942 * as with offline or HBA function reset), this routine will wait for the 7943 * outstanding mailbox command to complete before invoking the mailbox 7944 * sub-system flush routine to gracefully bring down mailbox sub-system. 7945 **/ 7946 void 7947 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 7948 { 7949 struct lpfc_sli *psli = &phba->sli; 7950 uint8_t actcmd = MBX_HEARTBEAT; 7951 unsigned long timeout; 7952 7953 spin_lock_irq(&phba->hbalock); 7954 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7955 spin_unlock_irq(&phba->hbalock); 7956 7957 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7958 spin_lock_irq(&phba->hbalock); 7959 if (phba->sli.mbox_active) 7960 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 7961 spin_unlock_irq(&phba->hbalock); 7962 /* Determine how long we might wait for the active mailbox 7963 * command to be gracefully completed by firmware. 7964 */ 7965 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 7966 1000) + jiffies; 7967 while (phba->sli.mbox_active) { 7968 /* Check active mailbox complete status every 2ms */ 7969 msleep(2); 7970 if (time_after(jiffies, timeout)) 7971 /* Timeout, let the mailbox flush routine to 7972 * forcefully release active mailbox command 7973 */ 7974 break; 7975 } 7976 } 7977 lpfc_sli_mbox_sys_flush(phba); 7978 } 7979 7980 /** 7981 * lpfc_sli_eratt_read - read sli-3 error attention events 7982 * @phba: Pointer to HBA context. 7983 * 7984 * This function is called to read the SLI3 device error attention registers 7985 * for possible error attention events. The caller must hold the hostlock 7986 * with spin_lock_irq(). 7987 * 7988 * This fucntion returns 1 when there is Error Attention in the Host Attention 7989 * Register and returns 0 otherwise. 7990 **/ 7991 static int 7992 lpfc_sli_eratt_read(struct lpfc_hba *phba) 7993 { 7994 uint32_t ha_copy; 7995 7996 /* Read chip Host Attention (HA) register */ 7997 ha_copy = readl(phba->HAregaddr); 7998 if (ha_copy & HA_ERATT) { 7999 /* Read host status register to retrieve error event */ 8000 lpfc_sli_read_hs(phba); 8001 8002 /* Check if there is a deferred error condition is active */ 8003 if ((HS_FFER1 & phba->work_hs) && 8004 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 8005 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 8006 phba->hba_flag |= DEFER_ERATT; 8007 /* Clear all interrupt enable conditions */ 8008 writel(0, phba->HCregaddr); 8009 readl(phba->HCregaddr); 8010 } 8011 8012 /* Set the driver HA work bitmap */ 8013 phba->work_ha |= HA_ERATT; 8014 /* Indicate polling handles this ERATT */ 8015 phba->hba_flag |= HBA_ERATT_HANDLED; 8016 return 1; 8017 } 8018 return 0; 8019 } 8020 8021 /** 8022 * lpfc_sli4_eratt_read - read sli-4 error attention events 8023 * @phba: Pointer to HBA context. 8024 * 8025 * This function is called to read the SLI4 device error attention registers 8026 * for possible error attention events. The caller must hold the hostlock 8027 * with spin_lock_irq(). 8028 * 8029 * This fucntion returns 1 when there is Error Attention in the Host Attention 8030 * Register and returns 0 otherwise. 8031 **/ 8032 static int 8033 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 8034 { 8035 uint32_t uerr_sta_hi, uerr_sta_lo; 8036 8037 /* For now, use the SLI4 device internal unrecoverable error 8038 * registers for error attention. This can be changed later. 8039 */ 8040 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); 8041 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); 8042 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 8043 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 8044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8045 "1423 HBA Unrecoverable error: " 8046 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 8047 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", 8048 uerr_sta_lo, uerr_sta_hi, 8049 phba->sli4_hba.ue_mask_lo, 8050 phba->sli4_hba.ue_mask_hi); 8051 phba->work_status[0] = uerr_sta_lo; 8052 phba->work_status[1] = uerr_sta_hi; 8053 /* Set the driver HA work bitmap */ 8054 phba->work_ha |= HA_ERATT; 8055 /* Indicate polling handles this ERATT */ 8056 phba->hba_flag |= HBA_ERATT_HANDLED; 8057 return 1; 8058 } 8059 return 0; 8060 } 8061 8062 /** 8063 * lpfc_sli_check_eratt - check error attention events 8064 * @phba: Pointer to HBA context. 8065 * 8066 * This function is called from timer soft interrupt context to check HBA's 8067 * error attention register bit for error attention events. 8068 * 8069 * This fucntion returns 1 when there is Error Attention in the Host Attention 8070 * Register and returns 0 otherwise. 8071 **/ 8072 int 8073 lpfc_sli_check_eratt(struct lpfc_hba *phba) 8074 { 8075 uint32_t ha_copy; 8076 8077 /* If somebody is waiting to handle an eratt, don't process it 8078 * here. The brdkill function will do this. 8079 */ 8080 if (phba->link_flag & LS_IGNORE_ERATT) 8081 return 0; 8082 8083 /* Check if interrupt handler handles this ERATT */ 8084 spin_lock_irq(&phba->hbalock); 8085 if (phba->hba_flag & HBA_ERATT_HANDLED) { 8086 /* Interrupt handler has handled ERATT */ 8087 spin_unlock_irq(&phba->hbalock); 8088 return 0; 8089 } 8090 8091 /* 8092 * If there is deferred error attention, do not check for error 8093 * attention 8094 */ 8095 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8096 spin_unlock_irq(&phba->hbalock); 8097 return 0; 8098 } 8099 8100 /* If PCI channel is offline, don't process it */ 8101 if (unlikely(pci_channel_offline(phba->pcidev))) { 8102 spin_unlock_irq(&phba->hbalock); 8103 return 0; 8104 } 8105 8106 switch (phba->sli_rev) { 8107 case LPFC_SLI_REV2: 8108 case LPFC_SLI_REV3: 8109 /* Read chip Host Attention (HA) register */ 8110 ha_copy = lpfc_sli_eratt_read(phba); 8111 break; 8112 case LPFC_SLI_REV4: 8113 /* Read devcie Uncoverable Error (UERR) registers */ 8114 ha_copy = lpfc_sli4_eratt_read(phba); 8115 break; 8116 default: 8117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8118 "0299 Invalid SLI revision (%d)\n", 8119 phba->sli_rev); 8120 ha_copy = 0; 8121 break; 8122 } 8123 spin_unlock_irq(&phba->hbalock); 8124 8125 return ha_copy; 8126 } 8127 8128 /** 8129 * lpfc_intr_state_check - Check device state for interrupt handling 8130 * @phba: Pointer to HBA context. 8131 * 8132 * This inline routine checks whether a device or its PCI slot is in a state 8133 * that the interrupt should be handled. 8134 * 8135 * This function returns 0 if the device or the PCI slot is in a state that 8136 * interrupt should be handled, otherwise -EIO. 8137 */ 8138 static inline int 8139 lpfc_intr_state_check(struct lpfc_hba *phba) 8140 { 8141 /* If the pci channel is offline, ignore all the interrupts */ 8142 if (unlikely(pci_channel_offline(phba->pcidev))) 8143 return -EIO; 8144 8145 /* Update device level interrupt statistics */ 8146 phba->sli.slistat.sli_intr++; 8147 8148 /* Ignore all interrupts during initialization. */ 8149 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 8150 return -EIO; 8151 8152 return 0; 8153 } 8154 8155 /** 8156 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 8157 * @irq: Interrupt number. 8158 * @dev_id: The device context pointer. 8159 * 8160 * This function is directly called from the PCI layer as an interrupt 8161 * service routine when device with SLI-3 interface spec is enabled with 8162 * MSI-X multi-message interrupt mode and there are slow-path events in 8163 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 8164 * interrupt mode, this function is called as part of the device-level 8165 * interrupt handler. When the PCI slot is in error recovery or the HBA 8166 * is undergoing initialization, the interrupt handler will not process 8167 * the interrupt. The link attention and ELS ring attention events are 8168 * handled by the worker thread. The interrupt handler signals the worker 8169 * thread and returns for these events. This function is called without 8170 * any lock held. It gets the hbalock to access and update SLI data 8171 * structures. 8172 * 8173 * This function returns IRQ_HANDLED when interrupt is handled else it 8174 * returns IRQ_NONE. 8175 **/ 8176 irqreturn_t 8177 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 8178 { 8179 struct lpfc_hba *phba; 8180 uint32_t ha_copy, hc_copy; 8181 uint32_t work_ha_copy; 8182 unsigned long status; 8183 unsigned long iflag; 8184 uint32_t control; 8185 8186 MAILBOX_t *mbox, *pmbox; 8187 struct lpfc_vport *vport; 8188 struct lpfc_nodelist *ndlp; 8189 struct lpfc_dmabuf *mp; 8190 LPFC_MBOXQ_t *pmb; 8191 int rc; 8192 8193 /* 8194 * Get the driver's phba structure from the dev_id and 8195 * assume the HBA is not interrupting. 8196 */ 8197 phba = (struct lpfc_hba *)dev_id; 8198 8199 if (unlikely(!phba)) 8200 return IRQ_NONE; 8201 8202 /* 8203 * Stuff needs to be attented to when this function is invoked as an 8204 * individual interrupt handler in MSI-X multi-message interrupt mode 8205 */ 8206 if (phba->intr_type == MSIX) { 8207 /* Check device state for handling interrupt */ 8208 if (lpfc_intr_state_check(phba)) 8209 return IRQ_NONE; 8210 /* Need to read HA REG for slow-path events */ 8211 spin_lock_irqsave(&phba->hbalock, iflag); 8212 ha_copy = readl(phba->HAregaddr); 8213 /* If somebody is waiting to handle an eratt don't process it 8214 * here. The brdkill function will do this. 8215 */ 8216 if (phba->link_flag & LS_IGNORE_ERATT) 8217 ha_copy &= ~HA_ERATT; 8218 /* Check the need for handling ERATT in interrupt handler */ 8219 if (ha_copy & HA_ERATT) { 8220 if (phba->hba_flag & HBA_ERATT_HANDLED) 8221 /* ERATT polling has handled ERATT */ 8222 ha_copy &= ~HA_ERATT; 8223 else 8224 /* Indicate interrupt handler handles ERATT */ 8225 phba->hba_flag |= HBA_ERATT_HANDLED; 8226 } 8227 8228 /* 8229 * If there is deferred error attention, do not check for any 8230 * interrupt. 8231 */ 8232 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8233 spin_unlock_irqrestore(&phba->hbalock, iflag); 8234 return IRQ_NONE; 8235 } 8236 8237 /* Clear up only attention source related to slow-path */ 8238 hc_copy = readl(phba->HCregaddr); 8239 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 8240 HC_LAINT_ENA | HC_ERINT_ENA), 8241 phba->HCregaddr); 8242 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 8243 phba->HAregaddr); 8244 writel(hc_copy, phba->HCregaddr); 8245 readl(phba->HAregaddr); /* flush */ 8246 spin_unlock_irqrestore(&phba->hbalock, iflag); 8247 } else 8248 ha_copy = phba->ha_copy; 8249 8250 work_ha_copy = ha_copy & phba->work_ha_mask; 8251 8252 if (work_ha_copy) { 8253 if (work_ha_copy & HA_LATT) { 8254 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 8255 /* 8256 * Turn off Link Attention interrupts 8257 * until CLEAR_LA done 8258 */ 8259 spin_lock_irqsave(&phba->hbalock, iflag); 8260 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 8261 control = readl(phba->HCregaddr); 8262 control &= ~HC_LAINT_ENA; 8263 writel(control, phba->HCregaddr); 8264 readl(phba->HCregaddr); /* flush */ 8265 spin_unlock_irqrestore(&phba->hbalock, iflag); 8266 } 8267 else 8268 work_ha_copy &= ~HA_LATT; 8269 } 8270 8271 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 8272 /* 8273 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 8274 * the only slow ring. 8275 */ 8276 status = (work_ha_copy & 8277 (HA_RXMASK << (4*LPFC_ELS_RING))); 8278 status >>= (4*LPFC_ELS_RING); 8279 if (status & HA_RXMASK) { 8280 spin_lock_irqsave(&phba->hbalock, iflag); 8281 control = readl(phba->HCregaddr); 8282 8283 lpfc_debugfs_slow_ring_trc(phba, 8284 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 8285 control, status, 8286 (uint32_t)phba->sli.slistat.sli_intr); 8287 8288 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 8289 lpfc_debugfs_slow_ring_trc(phba, 8290 "ISR Disable ring:" 8291 "pwork:x%x hawork:x%x wait:x%x", 8292 phba->work_ha, work_ha_copy, 8293 (uint32_t)((unsigned long) 8294 &phba->work_waitq)); 8295 8296 control &= 8297 ~(HC_R0INT_ENA << LPFC_ELS_RING); 8298 writel(control, phba->HCregaddr); 8299 readl(phba->HCregaddr); /* flush */ 8300 } 8301 else { 8302 lpfc_debugfs_slow_ring_trc(phba, 8303 "ISR slow ring: pwork:" 8304 "x%x hawork:x%x wait:x%x", 8305 phba->work_ha, work_ha_copy, 8306 (uint32_t)((unsigned long) 8307 &phba->work_waitq)); 8308 } 8309 spin_unlock_irqrestore(&phba->hbalock, iflag); 8310 } 8311 } 8312 spin_lock_irqsave(&phba->hbalock, iflag); 8313 if (work_ha_copy & HA_ERATT) { 8314 lpfc_sli_read_hs(phba); 8315 /* 8316 * Check if there is a deferred error condition 8317 * is active 8318 */ 8319 if ((HS_FFER1 & phba->work_hs) && 8320 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 8321 HS_FFER6 | HS_FFER7 | HS_FFER8) & 8322 phba->work_hs)) { 8323 phba->hba_flag |= DEFER_ERATT; 8324 /* Clear all interrupt enable conditions */ 8325 writel(0, phba->HCregaddr); 8326 readl(phba->HCregaddr); 8327 } 8328 } 8329 8330 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 8331 pmb = phba->sli.mbox_active; 8332 pmbox = &pmb->u.mb; 8333 mbox = phba->mbox; 8334 vport = pmb->vport; 8335 8336 /* First check out the status word */ 8337 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 8338 if (pmbox->mbxOwner != OWN_HOST) { 8339 spin_unlock_irqrestore(&phba->hbalock, iflag); 8340 /* 8341 * Stray Mailbox Interrupt, mbxCommand <cmd> 8342 * mbxStatus <status> 8343 */ 8344 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8345 LOG_SLI, 8346 "(%d):0304 Stray Mailbox " 8347 "Interrupt mbxCommand x%x " 8348 "mbxStatus x%x\n", 8349 (vport ? vport->vpi : 0), 8350 pmbox->mbxCommand, 8351 pmbox->mbxStatus); 8352 /* clear mailbox attention bit */ 8353 work_ha_copy &= ~HA_MBATT; 8354 } else { 8355 phba->sli.mbox_active = NULL; 8356 spin_unlock_irqrestore(&phba->hbalock, iflag); 8357 phba->last_completion_time = jiffies; 8358 del_timer(&phba->sli.mbox_tmo); 8359 if (pmb->mbox_cmpl) { 8360 lpfc_sli_pcimem_bcopy(mbox, pmbox, 8361 MAILBOX_CMD_SIZE); 8362 if (pmb->out_ext_byte_len && 8363 pmb->context2) 8364 lpfc_sli_pcimem_bcopy( 8365 phba->mbox_ext, 8366 pmb->context2, 8367 pmb->out_ext_byte_len); 8368 } 8369 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 8370 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 8371 8372 lpfc_debugfs_disc_trc(vport, 8373 LPFC_DISC_TRC_MBOX_VPORT, 8374 "MBOX dflt rpi: : " 8375 "status:x%x rpi:x%x", 8376 (uint32_t)pmbox->mbxStatus, 8377 pmbox->un.varWords[0], 0); 8378 8379 if (!pmbox->mbxStatus) { 8380 mp = (struct lpfc_dmabuf *) 8381 (pmb->context1); 8382 ndlp = (struct lpfc_nodelist *) 8383 pmb->context2; 8384 8385 /* Reg_LOGIN of dflt RPI was 8386 * successful. new lets get 8387 * rid of the RPI using the 8388 * same mbox buffer. 8389 */ 8390 lpfc_unreg_login(phba, 8391 vport->vpi, 8392 pmbox->un.varWords[0], 8393 pmb); 8394 pmb->mbox_cmpl = 8395 lpfc_mbx_cmpl_dflt_rpi; 8396 pmb->context1 = mp; 8397 pmb->context2 = ndlp; 8398 pmb->vport = vport; 8399 rc = lpfc_sli_issue_mbox(phba, 8400 pmb, 8401 MBX_NOWAIT); 8402 if (rc != MBX_BUSY) 8403 lpfc_printf_log(phba, 8404 KERN_ERR, 8405 LOG_MBOX | LOG_SLI, 8406 "0350 rc should have" 8407 "been MBX_BUSY\n"); 8408 if (rc != MBX_NOT_FINISHED) 8409 goto send_current_mbox; 8410 } 8411 } 8412 spin_lock_irqsave( 8413 &phba->pport->work_port_lock, 8414 iflag); 8415 phba->pport->work_port_events &= 8416 ~WORKER_MBOX_TMO; 8417 spin_unlock_irqrestore( 8418 &phba->pport->work_port_lock, 8419 iflag); 8420 lpfc_mbox_cmpl_put(phba, pmb); 8421 } 8422 } else 8423 spin_unlock_irqrestore(&phba->hbalock, iflag); 8424 8425 if ((work_ha_copy & HA_MBATT) && 8426 (phba->sli.mbox_active == NULL)) { 8427 send_current_mbox: 8428 /* Process next mailbox command if there is one */ 8429 do { 8430 rc = lpfc_sli_issue_mbox(phba, NULL, 8431 MBX_NOWAIT); 8432 } while (rc == MBX_NOT_FINISHED); 8433 if (rc != MBX_SUCCESS) 8434 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8435 LOG_SLI, "0349 rc should be " 8436 "MBX_SUCCESS\n"); 8437 } 8438 8439 spin_lock_irqsave(&phba->hbalock, iflag); 8440 phba->work_ha |= work_ha_copy; 8441 spin_unlock_irqrestore(&phba->hbalock, iflag); 8442 lpfc_worker_wake_up(phba); 8443 } 8444 return IRQ_HANDLED; 8445 8446 } /* lpfc_sli_sp_intr_handler */ 8447 8448 /** 8449 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 8450 * @irq: Interrupt number. 8451 * @dev_id: The device context pointer. 8452 * 8453 * This function is directly called from the PCI layer as an interrupt 8454 * service routine when device with SLI-3 interface spec is enabled with 8455 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 8456 * ring event in the HBA. However, when the device is enabled with either 8457 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 8458 * device-level interrupt handler. When the PCI slot is in error recovery 8459 * or the HBA is undergoing initialization, the interrupt handler will not 8460 * process the interrupt. The SCSI FCP fast-path ring event are handled in 8461 * the intrrupt context. This function is called without any lock held. 8462 * It gets the hbalock to access and update SLI data structures. 8463 * 8464 * This function returns IRQ_HANDLED when interrupt is handled else it 8465 * returns IRQ_NONE. 8466 **/ 8467 irqreturn_t 8468 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 8469 { 8470 struct lpfc_hba *phba; 8471 uint32_t ha_copy; 8472 unsigned long status; 8473 unsigned long iflag; 8474 8475 /* Get the driver's phba structure from the dev_id and 8476 * assume the HBA is not interrupting. 8477 */ 8478 phba = (struct lpfc_hba *) dev_id; 8479 8480 if (unlikely(!phba)) 8481 return IRQ_NONE; 8482 8483 /* 8484 * Stuff needs to be attented to when this function is invoked as an 8485 * individual interrupt handler in MSI-X multi-message interrupt mode 8486 */ 8487 if (phba->intr_type == MSIX) { 8488 /* Check device state for handling interrupt */ 8489 if (lpfc_intr_state_check(phba)) 8490 return IRQ_NONE; 8491 /* Need to read HA REG for FCP ring and other ring events */ 8492 ha_copy = readl(phba->HAregaddr); 8493 /* Clear up only attention source related to fast-path */ 8494 spin_lock_irqsave(&phba->hbalock, iflag); 8495 /* 8496 * If there is deferred error attention, do not check for 8497 * any interrupt. 8498 */ 8499 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8500 spin_unlock_irqrestore(&phba->hbalock, iflag); 8501 return IRQ_NONE; 8502 } 8503 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8504 phba->HAregaddr); 8505 readl(phba->HAregaddr); /* flush */ 8506 spin_unlock_irqrestore(&phba->hbalock, iflag); 8507 } else 8508 ha_copy = phba->ha_copy; 8509 8510 /* 8511 * Process all events on FCP ring. Take the optimized path for FCP IO. 8512 */ 8513 ha_copy &= ~(phba->work_ha_mask); 8514 8515 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 8516 status >>= (4*LPFC_FCP_RING); 8517 if (status & HA_RXMASK) 8518 lpfc_sli_handle_fast_ring_event(phba, 8519 &phba->sli.ring[LPFC_FCP_RING], 8520 status); 8521 8522 if (phba->cfg_multi_ring_support == 2) { 8523 /* 8524 * Process all events on extra ring. Take the optimized path 8525 * for extra ring IO. 8526 */ 8527 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 8528 status >>= (4*LPFC_EXTRA_RING); 8529 if (status & HA_RXMASK) { 8530 lpfc_sli_handle_fast_ring_event(phba, 8531 &phba->sli.ring[LPFC_EXTRA_RING], 8532 status); 8533 } 8534 } 8535 return IRQ_HANDLED; 8536 } /* lpfc_sli_fp_intr_handler */ 8537 8538 /** 8539 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 8540 * @irq: Interrupt number. 8541 * @dev_id: The device context pointer. 8542 * 8543 * This function is the HBA device-level interrupt handler to device with 8544 * SLI-3 interface spec, called from the PCI layer when either MSI or 8545 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 8546 * requires driver attention. This function invokes the slow-path interrupt 8547 * attention handling function and fast-path interrupt attention handling 8548 * function in turn to process the relevant HBA attention events. This 8549 * function is called without any lock held. It gets the hbalock to access 8550 * and update SLI data structures. 8551 * 8552 * This function returns IRQ_HANDLED when interrupt is handled, else it 8553 * returns IRQ_NONE. 8554 **/ 8555 irqreturn_t 8556 lpfc_sli_intr_handler(int irq, void *dev_id) 8557 { 8558 struct lpfc_hba *phba; 8559 irqreturn_t sp_irq_rc, fp_irq_rc; 8560 unsigned long status1, status2; 8561 uint32_t hc_copy; 8562 8563 /* 8564 * Get the driver's phba structure from the dev_id and 8565 * assume the HBA is not interrupting. 8566 */ 8567 phba = (struct lpfc_hba *) dev_id; 8568 8569 if (unlikely(!phba)) 8570 return IRQ_NONE; 8571 8572 /* Check device state for handling interrupt */ 8573 if (lpfc_intr_state_check(phba)) 8574 return IRQ_NONE; 8575 8576 spin_lock(&phba->hbalock); 8577 phba->ha_copy = readl(phba->HAregaddr); 8578 if (unlikely(!phba->ha_copy)) { 8579 spin_unlock(&phba->hbalock); 8580 return IRQ_NONE; 8581 } else if (phba->ha_copy & HA_ERATT) { 8582 if (phba->hba_flag & HBA_ERATT_HANDLED) 8583 /* ERATT polling has handled ERATT */ 8584 phba->ha_copy &= ~HA_ERATT; 8585 else 8586 /* Indicate interrupt handler handles ERATT */ 8587 phba->hba_flag |= HBA_ERATT_HANDLED; 8588 } 8589 8590 /* 8591 * If there is deferred error attention, do not check for any interrupt. 8592 */ 8593 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8594 spin_unlock(&phba->hbalock); 8595 return IRQ_NONE; 8596 } 8597 8598 /* Clear attention sources except link and error attentions */ 8599 hc_copy = readl(phba->HCregaddr); 8600 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 8601 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 8602 phba->HCregaddr); 8603 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 8604 writel(hc_copy, phba->HCregaddr); 8605 readl(phba->HAregaddr); /* flush */ 8606 spin_unlock(&phba->hbalock); 8607 8608 /* 8609 * Invokes slow-path host attention interrupt handling as appropriate. 8610 */ 8611 8612 /* status of events with mailbox and link attention */ 8613 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 8614 8615 /* status of events with ELS ring */ 8616 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 8617 status2 >>= (4*LPFC_ELS_RING); 8618 8619 if (status1 || (status2 & HA_RXMASK)) 8620 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 8621 else 8622 sp_irq_rc = IRQ_NONE; 8623 8624 /* 8625 * Invoke fast-path host attention interrupt handling as appropriate. 8626 */ 8627 8628 /* status of events with FCP ring */ 8629 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 8630 status1 >>= (4*LPFC_FCP_RING); 8631 8632 /* status of events with extra ring */ 8633 if (phba->cfg_multi_ring_support == 2) { 8634 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 8635 status2 >>= (4*LPFC_EXTRA_RING); 8636 } else 8637 status2 = 0; 8638 8639 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8640 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 8641 else 8642 fp_irq_rc = IRQ_NONE; 8643 8644 /* Return device-level interrupt handling status */ 8645 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8646 } /* lpfc_sli_intr_handler */ 8647 8648 /** 8649 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 8650 * @phba: pointer to lpfc hba data structure. 8651 * 8652 * This routine is invoked by the worker thread to process all the pending 8653 * SLI4 FCP abort XRI events. 8654 **/ 8655 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 8656 { 8657 struct lpfc_cq_event *cq_event; 8658 8659 /* First, declare the fcp xri abort event has been handled */ 8660 spin_lock_irq(&phba->hbalock); 8661 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 8662 spin_unlock_irq(&phba->hbalock); 8663 /* Now, handle all the fcp xri abort events */ 8664 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 8665 /* Get the first event from the head of the event queue */ 8666 spin_lock_irq(&phba->hbalock); 8667 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 8668 cq_event, struct lpfc_cq_event, list); 8669 spin_unlock_irq(&phba->hbalock); 8670 /* Notify aborted XRI for FCP work queue */ 8671 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 8672 /* Free the event processed back to the free pool */ 8673 lpfc_sli4_cq_event_release(phba, cq_event); 8674 } 8675 } 8676 8677 /** 8678 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 8679 * @phba: pointer to lpfc hba data structure. 8680 * 8681 * This routine is invoked by the worker thread to process all the pending 8682 * SLI4 els abort xri events. 8683 **/ 8684 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 8685 { 8686 struct lpfc_cq_event *cq_event; 8687 8688 /* First, declare the els xri abort event has been handled */ 8689 spin_lock_irq(&phba->hbalock); 8690 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 8691 spin_unlock_irq(&phba->hbalock); 8692 /* Now, handle all the els xri abort events */ 8693 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 8694 /* Get the first event from the head of the event queue */ 8695 spin_lock_irq(&phba->hbalock); 8696 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 8697 cq_event, struct lpfc_cq_event, list); 8698 spin_unlock_irq(&phba->hbalock); 8699 /* Notify aborted XRI for ELS work queue */ 8700 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 8701 /* Free the event processed back to the free pool */ 8702 lpfc_sli4_cq_event_release(phba, cq_event); 8703 } 8704 } 8705 8706 /** 8707 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 8708 * @phba: pointer to lpfc hba data structure 8709 * @pIocbIn: pointer to the rspiocbq 8710 * @pIocbOut: pointer to the cmdiocbq 8711 * @wcqe: pointer to the complete wcqe 8712 * 8713 * This routine transfers the fields of a command iocbq to a response iocbq 8714 * by copying all the IOCB fields from command iocbq and transferring the 8715 * completion status information from the complete wcqe. 8716 **/ 8717 static void 8718 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 8719 struct lpfc_iocbq *pIocbIn, 8720 struct lpfc_iocbq *pIocbOut, 8721 struct lpfc_wcqe_complete *wcqe) 8722 { 8723 unsigned long iflags; 8724 size_t offset = offsetof(struct lpfc_iocbq, iocb); 8725 8726 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8727 sizeof(struct lpfc_iocbq) - offset); 8728 /* Map WCQE parameters into irspiocb parameters */ 8729 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8730 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8731 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 8732 pIocbIn->iocb.un.fcpi.fcpi_parm = 8733 pIocbOut->iocb.un.fcpi.fcpi_parm - 8734 wcqe->total_data_placed; 8735 else 8736 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8737 else { 8738 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8739 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 8740 } 8741 8742 /* Pick up HBA exchange busy condition */ 8743 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 8744 spin_lock_irqsave(&phba->hbalock, iflags); 8745 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 8746 spin_unlock_irqrestore(&phba->hbalock, iflags); 8747 } 8748 } 8749 8750 /** 8751 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 8752 * @phba: Pointer to HBA context object. 8753 * @wcqe: Pointer to work-queue completion queue entry. 8754 * 8755 * This routine handles an ELS work-queue completion event and construct 8756 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 8757 * discovery engine to handle. 8758 * 8759 * Return: Pointer to the receive IOCBQ, NULL otherwise. 8760 **/ 8761 static struct lpfc_iocbq * 8762 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 8763 struct lpfc_iocbq *irspiocbq) 8764 { 8765 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 8766 struct lpfc_iocbq *cmdiocbq; 8767 struct lpfc_wcqe_complete *wcqe; 8768 unsigned long iflags; 8769 8770 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 8771 spin_lock_irqsave(&phba->hbalock, iflags); 8772 pring->stats.iocb_event++; 8773 /* Look up the ELS command IOCB and create pseudo response IOCB */ 8774 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 8775 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 8776 spin_unlock_irqrestore(&phba->hbalock, iflags); 8777 8778 if (unlikely(!cmdiocbq)) { 8779 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8780 "0386 ELS complete with no corresponding " 8781 "cmdiocb: iotag (%d)\n", 8782 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 8783 lpfc_sli_release_iocbq(phba, irspiocbq); 8784 return NULL; 8785 } 8786 8787 /* Fake the irspiocbq and copy necessary response information */ 8788 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 8789 8790 return irspiocbq; 8791 } 8792 8793 /** 8794 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 8795 * @phba: Pointer to HBA context object. 8796 * @cqe: Pointer to mailbox completion queue entry. 8797 * 8798 * This routine process a mailbox completion queue entry with asynchrous 8799 * event. 8800 * 8801 * Return: true if work posted to worker thread, otherwise false. 8802 **/ 8803 static bool 8804 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 8805 { 8806 struct lpfc_cq_event *cq_event; 8807 unsigned long iflags; 8808 8809 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8810 "0392 Async Event: word0:x%x, word1:x%x, " 8811 "word2:x%x, word3:x%x\n", mcqe->word0, 8812 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 8813 8814 /* Allocate a new internal CQ_EVENT entry */ 8815 cq_event = lpfc_sli4_cq_event_alloc(phba); 8816 if (!cq_event) { 8817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8818 "0394 Failed to allocate CQ_EVENT entry\n"); 8819 return false; 8820 } 8821 8822 /* Move the CQE into an asynchronous event entry */ 8823 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 8824 spin_lock_irqsave(&phba->hbalock, iflags); 8825 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 8826 /* Set the async event flag */ 8827 phba->hba_flag |= ASYNC_EVENT; 8828 spin_unlock_irqrestore(&phba->hbalock, iflags); 8829 8830 return true; 8831 } 8832 8833 /** 8834 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 8835 * @phba: Pointer to HBA context object. 8836 * @cqe: Pointer to mailbox completion queue entry. 8837 * 8838 * This routine process a mailbox completion queue entry with mailbox 8839 * completion event. 8840 * 8841 * Return: true if work posted to worker thread, otherwise false. 8842 **/ 8843 static bool 8844 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 8845 { 8846 uint32_t mcqe_status; 8847 MAILBOX_t *mbox, *pmbox; 8848 struct lpfc_mqe *mqe; 8849 struct lpfc_vport *vport; 8850 struct lpfc_nodelist *ndlp; 8851 struct lpfc_dmabuf *mp; 8852 unsigned long iflags; 8853 LPFC_MBOXQ_t *pmb; 8854 bool workposted = false; 8855 int rc; 8856 8857 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 8858 if (!bf_get(lpfc_trailer_completed, mcqe)) 8859 goto out_no_mqe_complete; 8860 8861 /* Get the reference to the active mbox command */ 8862 spin_lock_irqsave(&phba->hbalock, iflags); 8863 pmb = phba->sli.mbox_active; 8864 if (unlikely(!pmb)) { 8865 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 8866 "1832 No pending MBOX command to handle\n"); 8867 spin_unlock_irqrestore(&phba->hbalock, iflags); 8868 goto out_no_mqe_complete; 8869 } 8870 spin_unlock_irqrestore(&phba->hbalock, iflags); 8871 mqe = &pmb->u.mqe; 8872 pmbox = (MAILBOX_t *)&pmb->u.mqe; 8873 mbox = phba->mbox; 8874 vport = pmb->vport; 8875 8876 /* Reset heartbeat timer */ 8877 phba->last_completion_time = jiffies; 8878 del_timer(&phba->sli.mbox_tmo); 8879 8880 /* Move mbox data to caller's mailbox region, do endian swapping */ 8881 if (pmb->mbox_cmpl && mbox) 8882 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 8883 /* Set the mailbox status with SLI4 range 0x4000 */ 8884 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 8885 if (mcqe_status != MB_CQE_STATUS_SUCCESS) 8886 bf_set(lpfc_mqe_status, mqe, 8887 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8888 8889 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 8890 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 8891 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 8892 "MBOX dflt rpi: status:x%x rpi:x%x", 8893 mcqe_status, 8894 pmbox->un.varWords[0], 0); 8895 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 8896 mp = (struct lpfc_dmabuf *)(pmb->context1); 8897 ndlp = (struct lpfc_nodelist *)pmb->context2; 8898 /* Reg_LOGIN of dflt RPI was successful. Now lets get 8899 * RID of the PPI using the same mbox buffer. 8900 */ 8901 lpfc_unreg_login(phba, vport->vpi, 8902 pmbox->un.varWords[0], pmb); 8903 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 8904 pmb->context1 = mp; 8905 pmb->context2 = ndlp; 8906 pmb->vport = vport; 8907 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 8908 if (rc != MBX_BUSY) 8909 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8910 LOG_SLI, "0385 rc should " 8911 "have been MBX_BUSY\n"); 8912 if (rc != MBX_NOT_FINISHED) 8913 goto send_current_mbox; 8914 } 8915 } 8916 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 8917 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 8918 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 8919 8920 /* There is mailbox completion work to do */ 8921 spin_lock_irqsave(&phba->hbalock, iflags); 8922 __lpfc_mbox_cmpl_put(phba, pmb); 8923 phba->work_ha |= HA_MBATT; 8924 spin_unlock_irqrestore(&phba->hbalock, iflags); 8925 workposted = true; 8926 8927 send_current_mbox: 8928 spin_lock_irqsave(&phba->hbalock, iflags); 8929 /* Release the mailbox command posting token */ 8930 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8931 /* Setting active mailbox pointer need to be in sync to flag clear */ 8932 phba->sli.mbox_active = NULL; 8933 spin_unlock_irqrestore(&phba->hbalock, iflags); 8934 /* Wake up worker thread to post the next pending mailbox command */ 8935 lpfc_worker_wake_up(phba); 8936 out_no_mqe_complete: 8937 if (bf_get(lpfc_trailer_consumed, mcqe)) 8938 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 8939 return workposted; 8940 } 8941 8942 /** 8943 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 8944 * @phba: Pointer to HBA context object. 8945 * @cqe: Pointer to mailbox completion queue entry. 8946 * 8947 * This routine process a mailbox completion queue entry, it invokes the 8948 * proper mailbox complete handling or asynchrous event handling routine 8949 * according to the MCQE's async bit. 8950 * 8951 * Return: true if work posted to worker thread, otherwise false. 8952 **/ 8953 static bool 8954 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 8955 { 8956 struct lpfc_mcqe mcqe; 8957 bool workposted; 8958 8959 /* Copy the mailbox MCQE and convert endian order as needed */ 8960 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 8961 8962 /* Invoke the proper event handling routine */ 8963 if (!bf_get(lpfc_trailer_async, &mcqe)) 8964 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 8965 else 8966 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 8967 return workposted; 8968 } 8969 8970 /** 8971 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 8972 * @phba: Pointer to HBA context object. 8973 * @wcqe: Pointer to work-queue completion queue entry. 8974 * 8975 * This routine handles an ELS work-queue completion event. 8976 * 8977 * Return: true if work posted to worker thread, otherwise false. 8978 **/ 8979 static bool 8980 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 8981 struct lpfc_wcqe_complete *wcqe) 8982 { 8983 struct lpfc_iocbq *irspiocbq; 8984 unsigned long iflags; 8985 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 8986 8987 /* Get an irspiocbq for later ELS response processing use */ 8988 irspiocbq = lpfc_sli_get_iocbq(phba); 8989 if (!irspiocbq) { 8990 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8991 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 8992 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 8993 pring->txq_cnt, phba->iocb_cnt, 8994 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 8995 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 8996 return false; 8997 } 8998 8999 /* Save off the slow-path queue event for work thread to process */ 9000 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 9001 spin_lock_irqsave(&phba->hbalock, iflags); 9002 list_add_tail(&irspiocbq->cq_event.list, 9003 &phba->sli4_hba.sp_queue_event); 9004 phba->hba_flag |= HBA_SP_QUEUE_EVT; 9005 spin_unlock_irqrestore(&phba->hbalock, iflags); 9006 9007 return true; 9008 } 9009 9010 /** 9011 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 9012 * @phba: Pointer to HBA context object. 9013 * @wcqe: Pointer to work-queue completion queue entry. 9014 * 9015 * This routine handles slow-path WQ entry comsumed event by invoking the 9016 * proper WQ release routine to the slow-path WQ. 9017 **/ 9018 static void 9019 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 9020 struct lpfc_wcqe_release *wcqe) 9021 { 9022 /* Check for the slow-path ELS work queue */ 9023 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 9024 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 9025 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 9026 else 9027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9028 "2579 Slow-path wqe consume event carries " 9029 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 9030 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 9031 phba->sli4_hba.els_wq->queue_id); 9032 } 9033 9034 /** 9035 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 9036 * @phba: Pointer to HBA context object. 9037 * @cq: Pointer to a WQ completion queue. 9038 * @wcqe: Pointer to work-queue completion queue entry. 9039 * 9040 * This routine handles an XRI abort event. 9041 * 9042 * Return: true if work posted to worker thread, otherwise false. 9043 **/ 9044 static bool 9045 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 9046 struct lpfc_queue *cq, 9047 struct sli4_wcqe_xri_aborted *wcqe) 9048 { 9049 bool workposted = false; 9050 struct lpfc_cq_event *cq_event; 9051 unsigned long iflags; 9052 9053 /* Allocate a new internal CQ_EVENT entry */ 9054 cq_event = lpfc_sli4_cq_event_alloc(phba); 9055 if (!cq_event) { 9056 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9057 "0602 Failed to allocate CQ_EVENT entry\n"); 9058 return false; 9059 } 9060 9061 /* Move the CQE into the proper xri abort event list */ 9062 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 9063 switch (cq->subtype) { 9064 case LPFC_FCP: 9065 spin_lock_irqsave(&phba->hbalock, iflags); 9066 list_add_tail(&cq_event->list, 9067 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 9068 /* Set the fcp xri abort event flag */ 9069 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 9070 spin_unlock_irqrestore(&phba->hbalock, iflags); 9071 workposted = true; 9072 break; 9073 case LPFC_ELS: 9074 spin_lock_irqsave(&phba->hbalock, iflags); 9075 list_add_tail(&cq_event->list, 9076 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 9077 /* Set the els xri abort event flag */ 9078 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 9079 spin_unlock_irqrestore(&phba->hbalock, iflags); 9080 workposted = true; 9081 break; 9082 default: 9083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9084 "0603 Invalid work queue CQE subtype (x%x)\n", 9085 cq->subtype); 9086 workposted = false; 9087 break; 9088 } 9089 return workposted; 9090 } 9091 9092 /** 9093 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 9094 * @phba: Pointer to HBA context object. 9095 * @rcqe: Pointer to receive-queue completion queue entry. 9096 * 9097 * This routine process a receive-queue completion queue entry. 9098 * 9099 * Return: true if work posted to worker thread, otherwise false. 9100 **/ 9101 static bool 9102 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 9103 { 9104 bool workposted = false; 9105 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 9106 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 9107 struct hbq_dmabuf *dma_buf; 9108 uint32_t status; 9109 unsigned long iflags; 9110 9111 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) 9112 goto out; 9113 9114 status = bf_get(lpfc_rcqe_status, rcqe); 9115 switch (status) { 9116 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 9117 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9118 "2537 Receive Frame Truncated!!\n"); 9119 case FC_STATUS_RQ_SUCCESS: 9120 lpfc_sli4_rq_release(hrq, drq); 9121 spin_lock_irqsave(&phba->hbalock, iflags); 9122 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 9123 if (!dma_buf) { 9124 spin_unlock_irqrestore(&phba->hbalock, iflags); 9125 goto out; 9126 } 9127 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 9128 /* save off the frame for the word thread to process */ 9129 list_add_tail(&dma_buf->cq_event.list, 9130 &phba->sli4_hba.sp_queue_event); 9131 /* Frame received */ 9132 phba->hba_flag |= HBA_SP_QUEUE_EVT; 9133 spin_unlock_irqrestore(&phba->hbalock, iflags); 9134 workposted = true; 9135 break; 9136 case FC_STATUS_INSUFF_BUF_NEED_BUF: 9137 case FC_STATUS_INSUFF_BUF_FRM_DISC: 9138 /* Post more buffers if possible */ 9139 spin_lock_irqsave(&phba->hbalock, iflags); 9140 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 9141 spin_unlock_irqrestore(&phba->hbalock, iflags); 9142 workposted = true; 9143 break; 9144 } 9145 out: 9146 return workposted; 9147 } 9148 9149 /** 9150 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 9151 * @phba: Pointer to HBA context object. 9152 * @cq: Pointer to the completion queue. 9153 * @wcqe: Pointer to a completion queue entry. 9154 * 9155 * This routine process a slow-path work-queue or recieve queue completion queue 9156 * entry. 9157 * 9158 * Return: true if work posted to worker thread, otherwise false. 9159 **/ 9160 static bool 9161 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9162 struct lpfc_cqe *cqe) 9163 { 9164 struct lpfc_cqe cqevt; 9165 bool workposted = false; 9166 9167 /* Copy the work queue CQE and convert endian order if needed */ 9168 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 9169 9170 /* Check and process for different type of WCQE and dispatch */ 9171 switch (bf_get(lpfc_cqe_code, &cqevt)) { 9172 case CQE_CODE_COMPL_WQE: 9173 /* Process the WQ/RQ complete event */ 9174 phba->last_completion_time = jiffies; 9175 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 9176 (struct lpfc_wcqe_complete *)&cqevt); 9177 break; 9178 case CQE_CODE_RELEASE_WQE: 9179 /* Process the WQ release event */ 9180 lpfc_sli4_sp_handle_rel_wcqe(phba, 9181 (struct lpfc_wcqe_release *)&cqevt); 9182 break; 9183 case CQE_CODE_XRI_ABORTED: 9184 /* Process the WQ XRI abort event */ 9185 phba->last_completion_time = jiffies; 9186 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 9187 (struct sli4_wcqe_xri_aborted *)&cqevt); 9188 break; 9189 case CQE_CODE_RECEIVE: 9190 /* Process the RQ event */ 9191 phba->last_completion_time = jiffies; 9192 workposted = lpfc_sli4_sp_handle_rcqe(phba, 9193 (struct lpfc_rcqe *)&cqevt); 9194 break; 9195 default: 9196 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9197 "0388 Not a valid WCQE code: x%x\n", 9198 bf_get(lpfc_cqe_code, &cqevt)); 9199 break; 9200 } 9201 return workposted; 9202 } 9203 9204 /** 9205 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 9206 * @phba: Pointer to HBA context object. 9207 * @eqe: Pointer to fast-path event queue entry. 9208 * 9209 * This routine process a event queue entry from the slow-path event queue. 9210 * It will check the MajorCode and MinorCode to determine this is for a 9211 * completion event on a completion queue, if not, an error shall be logged 9212 * and just return. Otherwise, it will get to the corresponding completion 9213 * queue and process all the entries on that completion queue, rearm the 9214 * completion queue, and then return. 9215 * 9216 **/ 9217 static void 9218 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 9219 { 9220 struct lpfc_queue *cq = NULL, *childq, *speq; 9221 struct lpfc_cqe *cqe; 9222 bool workposted = false; 9223 int ecount = 0; 9224 uint16_t cqid; 9225 9226 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 9227 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9228 "0359 Not a valid slow-path completion " 9229 "event: majorcode=x%x, minorcode=x%x\n", 9230 bf_get_le32(lpfc_eqe_major_code, eqe), 9231 bf_get_le32(lpfc_eqe_minor_code, eqe)); 9232 return; 9233 } 9234 9235 /* Get the reference to the corresponding CQ */ 9236 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 9237 9238 /* Search for completion queue pointer matching this cqid */ 9239 speq = phba->sli4_hba.sp_eq; 9240 list_for_each_entry(childq, &speq->child_list, list) { 9241 if (childq->queue_id == cqid) { 9242 cq = childq; 9243 break; 9244 } 9245 } 9246 if (unlikely(!cq)) { 9247 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 9248 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9249 "0365 Slow-path CQ identifier " 9250 "(%d) does not exist\n", cqid); 9251 return; 9252 } 9253 9254 /* Process all the entries to the CQ */ 9255 switch (cq->type) { 9256 case LPFC_MCQ: 9257 while ((cqe = lpfc_sli4_cq_get(cq))) { 9258 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 9259 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9260 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9261 } 9262 break; 9263 case LPFC_WCQ: 9264 while ((cqe = lpfc_sli4_cq_get(cq))) { 9265 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); 9266 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9267 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9268 } 9269 break; 9270 default: 9271 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9272 "0370 Invalid completion queue type (%d)\n", 9273 cq->type); 9274 return; 9275 } 9276 9277 /* Catch the no cq entry condition, log an error */ 9278 if (unlikely(ecount == 0)) 9279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9280 "0371 No entry from the CQ: identifier " 9281 "(x%x), type (%d)\n", cq->queue_id, cq->type); 9282 9283 /* In any case, flash and re-arm the RCQ */ 9284 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 9285 9286 /* wake up worker thread if there are works to be done */ 9287 if (workposted) 9288 lpfc_worker_wake_up(phba); 9289 } 9290 9291 /** 9292 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 9293 * @eqe: Pointer to fast-path completion queue entry. 9294 * 9295 * This routine process a fast-path work queue completion entry from fast-path 9296 * event queue for FCP command response completion. 9297 **/ 9298 static void 9299 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 9300 struct lpfc_wcqe_complete *wcqe) 9301 { 9302 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 9303 struct lpfc_iocbq *cmdiocbq; 9304 struct lpfc_iocbq irspiocbq; 9305 unsigned long iflags; 9306 9307 spin_lock_irqsave(&phba->hbalock, iflags); 9308 pring->stats.iocb_event++; 9309 spin_unlock_irqrestore(&phba->hbalock, iflags); 9310 9311 /* Check for response status */ 9312 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 9313 /* If resource errors reported from HBA, reduce queue 9314 * depth of the SCSI device. 9315 */ 9316 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 9317 IOSTAT_LOCAL_REJECT) && 9318 (wcqe->parameter == IOERR_NO_RESOURCES)) { 9319 phba->lpfc_rampdown_queue_depth(phba); 9320 } 9321 /* Log the error status */ 9322 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9323 "0373 FCP complete error: status=x%x, " 9324 "hw_status=x%x, total_data_specified=%d, " 9325 "parameter=x%x, word3=x%x\n", 9326 bf_get(lpfc_wcqe_c_status, wcqe), 9327 bf_get(lpfc_wcqe_c_hw_status, wcqe), 9328 wcqe->total_data_placed, wcqe->parameter, 9329 wcqe->word3); 9330 } 9331 9332 /* Look up the FCP command IOCB and create pseudo response IOCB */ 9333 spin_lock_irqsave(&phba->hbalock, iflags); 9334 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 9335 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9336 spin_unlock_irqrestore(&phba->hbalock, iflags); 9337 if (unlikely(!cmdiocbq)) { 9338 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9339 "0374 FCP complete with no corresponding " 9340 "cmdiocb: iotag (%d)\n", 9341 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9342 return; 9343 } 9344 if (unlikely(!cmdiocbq->iocb_cmpl)) { 9345 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9346 "0375 FCP cmdiocb not callback function " 9347 "iotag: (%d)\n", 9348 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9349 return; 9350 } 9351 9352 /* Fake the irspiocb and copy necessary response information */ 9353 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9354 9355 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 9356 spin_lock_irqsave(&phba->hbalock, iflags); 9357 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 9358 spin_unlock_irqrestore(&phba->hbalock, iflags); 9359 } 9360 9361 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9362 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9363 } 9364 9365 /** 9366 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 9367 * @phba: Pointer to HBA context object. 9368 * @cq: Pointer to completion queue. 9369 * @wcqe: Pointer to work-queue completion queue entry. 9370 * 9371 * This routine handles an fast-path WQ entry comsumed event by invoking the 9372 * proper WQ release routine to the slow-path WQ. 9373 **/ 9374 static void 9375 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9376 struct lpfc_wcqe_release *wcqe) 9377 { 9378 struct lpfc_queue *childwq; 9379 bool wqid_matched = false; 9380 uint16_t fcp_wqid; 9381 9382 /* Check for fast-path FCP work queue release */ 9383 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 9384 list_for_each_entry(childwq, &cq->child_list, list) { 9385 if (childwq->queue_id == fcp_wqid) { 9386 lpfc_sli4_wq_release(childwq, 9387 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 9388 wqid_matched = true; 9389 break; 9390 } 9391 } 9392 /* Report warning log message if no match found */ 9393 if (wqid_matched != true) 9394 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9395 "2580 Fast-path wqe consume event carries " 9396 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 9397 } 9398 9399 /** 9400 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 9401 * @cq: Pointer to the completion queue. 9402 * @eqe: Pointer to fast-path completion queue entry. 9403 * 9404 * This routine process a fast-path work queue completion entry from fast-path 9405 * event queue for FCP command response completion. 9406 **/ 9407 static int 9408 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9409 struct lpfc_cqe *cqe) 9410 { 9411 struct lpfc_wcqe_release wcqe; 9412 bool workposted = false; 9413 9414 /* Copy the work queue CQE and convert endian order if needed */ 9415 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 9416 9417 /* Check and process for different type of WCQE and dispatch */ 9418 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 9419 case CQE_CODE_COMPL_WQE: 9420 /* Process the WQ complete event */ 9421 phba->last_completion_time = jiffies; 9422 lpfc_sli4_fp_handle_fcp_wcqe(phba, 9423 (struct lpfc_wcqe_complete *)&wcqe); 9424 break; 9425 case CQE_CODE_RELEASE_WQE: 9426 /* Process the WQ release event */ 9427 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 9428 (struct lpfc_wcqe_release *)&wcqe); 9429 break; 9430 case CQE_CODE_XRI_ABORTED: 9431 /* Process the WQ XRI abort event */ 9432 phba->last_completion_time = jiffies; 9433 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 9434 (struct sli4_wcqe_xri_aborted *)&wcqe); 9435 break; 9436 default: 9437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9438 "0144 Not a valid WCQE code: x%x\n", 9439 bf_get(lpfc_wcqe_c_code, &wcqe)); 9440 break; 9441 } 9442 return workposted; 9443 } 9444 9445 /** 9446 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 9447 * @phba: Pointer to HBA context object. 9448 * @eqe: Pointer to fast-path event queue entry. 9449 * 9450 * This routine process a event queue entry from the fast-path event queue. 9451 * It will check the MajorCode and MinorCode to determine this is for a 9452 * completion event on a completion queue, if not, an error shall be logged 9453 * and just return. Otherwise, it will get to the corresponding completion 9454 * queue and process all the entries on the completion queue, rearm the 9455 * completion queue, and then return. 9456 **/ 9457 static void 9458 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 9459 uint32_t fcp_cqidx) 9460 { 9461 struct lpfc_queue *cq; 9462 struct lpfc_cqe *cqe; 9463 bool workposted = false; 9464 uint16_t cqid; 9465 int ecount = 0; 9466 9467 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 9468 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9469 "0366 Not a valid fast-path completion " 9470 "event: majorcode=x%x, minorcode=x%x\n", 9471 bf_get_le32(lpfc_eqe_major_code, eqe), 9472 bf_get_le32(lpfc_eqe_minor_code, eqe)); 9473 return; 9474 } 9475 9476 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 9477 if (unlikely(!cq)) { 9478 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 9479 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9480 "0367 Fast-path completion queue " 9481 "does not exist\n"); 9482 return; 9483 } 9484 9485 /* Get the reference to the corresponding CQ */ 9486 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 9487 if (unlikely(cqid != cq->queue_id)) { 9488 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9489 "0368 Miss-matched fast-path completion " 9490 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 9491 cqid, cq->queue_id); 9492 return; 9493 } 9494 9495 /* Process all the entries to the CQ */ 9496 while ((cqe = lpfc_sli4_cq_get(cq))) { 9497 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 9498 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9499 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9500 } 9501 9502 /* Catch the no cq entry condition */ 9503 if (unlikely(ecount == 0)) 9504 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9505 "0369 No entry from fast-path completion " 9506 "queue fcpcqid=%d\n", cq->queue_id); 9507 9508 /* In any case, flash and re-arm the CQ */ 9509 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 9510 9511 /* wake up worker thread if there are works to be done */ 9512 if (workposted) 9513 lpfc_worker_wake_up(phba); 9514 } 9515 9516 static void 9517 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 9518 { 9519 struct lpfc_eqe *eqe; 9520 9521 /* walk all the EQ entries and drop on the floor */ 9522 while ((eqe = lpfc_sli4_eq_get(eq))) 9523 ; 9524 9525 /* Clear and re-arm the EQ */ 9526 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 9527 } 9528 9529 /** 9530 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 9531 * @irq: Interrupt number. 9532 * @dev_id: The device context pointer. 9533 * 9534 * This function is directly called from the PCI layer as an interrupt 9535 * service routine when device with SLI-4 interface spec is enabled with 9536 * MSI-X multi-message interrupt mode and there are slow-path events in 9537 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 9538 * interrupt mode, this function is called as part of the device-level 9539 * interrupt handler. When the PCI slot is in error recovery or the HBA is 9540 * undergoing initialization, the interrupt handler will not process the 9541 * interrupt. The link attention and ELS ring attention events are handled 9542 * by the worker thread. The interrupt handler signals the worker thread 9543 * and returns for these events. This function is called without any lock 9544 * held. It gets the hbalock to access and update SLI data structures. 9545 * 9546 * This function returns IRQ_HANDLED when interrupt is handled else it 9547 * returns IRQ_NONE. 9548 **/ 9549 irqreturn_t 9550 lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 9551 { 9552 struct lpfc_hba *phba; 9553 struct lpfc_queue *speq; 9554 struct lpfc_eqe *eqe; 9555 unsigned long iflag; 9556 int ecount = 0; 9557 9558 /* 9559 * Get the driver's phba structure from the dev_id 9560 */ 9561 phba = (struct lpfc_hba *)dev_id; 9562 9563 if (unlikely(!phba)) 9564 return IRQ_NONE; 9565 9566 /* Get to the EQ struct associated with this vector */ 9567 speq = phba->sli4_hba.sp_eq; 9568 9569 /* Check device state for handling interrupt */ 9570 if (unlikely(lpfc_intr_state_check(phba))) { 9571 /* Check again for link_state with lock held */ 9572 spin_lock_irqsave(&phba->hbalock, iflag); 9573 if (phba->link_state < LPFC_LINK_DOWN) 9574 /* Flush, clear interrupt, and rearm the EQ */ 9575 lpfc_sli4_eq_flush(phba, speq); 9576 spin_unlock_irqrestore(&phba->hbalock, iflag); 9577 return IRQ_NONE; 9578 } 9579 9580 /* 9581 * Process all the event on FCP slow-path EQ 9582 */ 9583 while ((eqe = lpfc_sli4_eq_get(speq))) { 9584 lpfc_sli4_sp_handle_eqe(phba, eqe); 9585 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9586 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 9587 } 9588 9589 /* Always clear and re-arm the slow-path EQ */ 9590 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 9591 9592 /* Catch the no cq entry condition */ 9593 if (unlikely(ecount == 0)) { 9594 if (phba->intr_type == MSIX) 9595 /* MSI-X treated interrupt served as no EQ share INT */ 9596 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9597 "0357 MSI-X interrupt with no EQE\n"); 9598 else 9599 /* Non MSI-X treated on interrupt as EQ share INT */ 9600 return IRQ_NONE; 9601 } 9602 9603 return IRQ_HANDLED; 9604 } /* lpfc_sli4_sp_intr_handler */ 9605 9606 /** 9607 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 9608 * @irq: Interrupt number. 9609 * @dev_id: The device context pointer. 9610 * 9611 * This function is directly called from the PCI layer as an interrupt 9612 * service routine when device with SLI-4 interface spec is enabled with 9613 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 9614 * ring event in the HBA. However, when the device is enabled with either 9615 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 9616 * device-level interrupt handler. When the PCI slot is in error recovery 9617 * or the HBA is undergoing initialization, the interrupt handler will not 9618 * process the interrupt. The SCSI FCP fast-path ring event are handled in 9619 * the intrrupt context. This function is called without any lock held. 9620 * It gets the hbalock to access and update SLI data structures. Note that, 9621 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 9622 * equal to that of FCP CQ index. 9623 * 9624 * This function returns IRQ_HANDLED when interrupt is handled else it 9625 * returns IRQ_NONE. 9626 **/ 9627 irqreturn_t 9628 lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 9629 { 9630 struct lpfc_hba *phba; 9631 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 9632 struct lpfc_queue *fpeq; 9633 struct lpfc_eqe *eqe; 9634 unsigned long iflag; 9635 int ecount = 0; 9636 uint32_t fcp_eqidx; 9637 9638 /* Get the driver's phba structure from the dev_id */ 9639 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 9640 phba = fcp_eq_hdl->phba; 9641 fcp_eqidx = fcp_eq_hdl->idx; 9642 9643 if (unlikely(!phba)) 9644 return IRQ_NONE; 9645 9646 /* Get to the EQ struct associated with this vector */ 9647 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 9648 9649 /* Check device state for handling interrupt */ 9650 if (unlikely(lpfc_intr_state_check(phba))) { 9651 /* Check again for link_state with lock held */ 9652 spin_lock_irqsave(&phba->hbalock, iflag); 9653 if (phba->link_state < LPFC_LINK_DOWN) 9654 /* Flush, clear interrupt, and rearm the EQ */ 9655 lpfc_sli4_eq_flush(phba, fpeq); 9656 spin_unlock_irqrestore(&phba->hbalock, iflag); 9657 return IRQ_NONE; 9658 } 9659 9660 /* 9661 * Process all the event on FCP fast-path EQ 9662 */ 9663 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 9664 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 9665 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9666 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 9667 } 9668 9669 /* Always clear and re-arm the fast-path EQ */ 9670 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 9671 9672 if (unlikely(ecount == 0)) { 9673 if (phba->intr_type == MSIX) 9674 /* MSI-X treated interrupt served as no EQ share INT */ 9675 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9676 "0358 MSI-X interrupt with no EQE\n"); 9677 else 9678 /* Non MSI-X treated on interrupt as EQ share INT */ 9679 return IRQ_NONE; 9680 } 9681 9682 return IRQ_HANDLED; 9683 } /* lpfc_sli4_fp_intr_handler */ 9684 9685 /** 9686 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 9687 * @irq: Interrupt number. 9688 * @dev_id: The device context pointer. 9689 * 9690 * This function is the device-level interrupt handler to device with SLI-4 9691 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 9692 * interrupt mode is enabled and there is an event in the HBA which requires 9693 * driver attention. This function invokes the slow-path interrupt attention 9694 * handling function and fast-path interrupt attention handling function in 9695 * turn to process the relevant HBA attention events. This function is called 9696 * without any lock held. It gets the hbalock to access and update SLI data 9697 * structures. 9698 * 9699 * This function returns IRQ_HANDLED when interrupt is handled, else it 9700 * returns IRQ_NONE. 9701 **/ 9702 irqreturn_t 9703 lpfc_sli4_intr_handler(int irq, void *dev_id) 9704 { 9705 struct lpfc_hba *phba; 9706 irqreturn_t sp_irq_rc, fp_irq_rc; 9707 bool fp_handled = false; 9708 uint32_t fcp_eqidx; 9709 9710 /* Get the driver's phba structure from the dev_id */ 9711 phba = (struct lpfc_hba *)dev_id; 9712 9713 if (unlikely(!phba)) 9714 return IRQ_NONE; 9715 9716 /* 9717 * Invokes slow-path host attention interrupt handling as appropriate. 9718 */ 9719 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 9720 9721 /* 9722 * Invoke fast-path host attention interrupt handling as appropriate. 9723 */ 9724 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 9725 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 9726 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 9727 if (fp_irq_rc == IRQ_HANDLED) 9728 fp_handled |= true; 9729 } 9730 9731 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 9732 } /* lpfc_sli4_intr_handler */ 9733 9734 /** 9735 * lpfc_sli4_queue_free - free a queue structure and associated memory 9736 * @queue: The queue structure to free. 9737 * 9738 * This function frees a queue structure and the DMAable memeory used for 9739 * the host resident queue. This function must be called after destroying the 9740 * queue on the HBA. 9741 **/ 9742 void 9743 lpfc_sli4_queue_free(struct lpfc_queue *queue) 9744 { 9745 struct lpfc_dmabuf *dmabuf; 9746 9747 if (!queue) 9748 return; 9749 9750 while (!list_empty(&queue->page_list)) { 9751 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 9752 list); 9753 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 9754 dmabuf->virt, dmabuf->phys); 9755 kfree(dmabuf); 9756 } 9757 kfree(queue); 9758 return; 9759 } 9760 9761 /** 9762 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 9763 * @phba: The HBA that this queue is being created on. 9764 * @entry_size: The size of each queue entry for this queue. 9765 * @entry count: The number of entries that this queue will handle. 9766 * 9767 * This function allocates a queue structure and the DMAable memory used for 9768 * the host resident queue. This function must be called before creating the 9769 * queue on the HBA. 9770 **/ 9771 struct lpfc_queue * 9772 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 9773 uint32_t entry_count) 9774 { 9775 struct lpfc_queue *queue; 9776 struct lpfc_dmabuf *dmabuf; 9777 int x, total_qe_count; 9778 void *dma_pointer; 9779 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 9780 9781 if (!phba->sli4_hba.pc_sli4_params.supported) 9782 hw_page_size = SLI4_PAGE_SIZE; 9783 9784 queue = kzalloc(sizeof(struct lpfc_queue) + 9785 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 9786 if (!queue) 9787 return NULL; 9788 queue->page_count = (ALIGN(entry_size * entry_count, 9789 hw_page_size))/hw_page_size; 9790 INIT_LIST_HEAD(&queue->list); 9791 INIT_LIST_HEAD(&queue->page_list); 9792 INIT_LIST_HEAD(&queue->child_list); 9793 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 9794 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 9795 if (!dmabuf) 9796 goto out_fail; 9797 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9798 hw_page_size, &dmabuf->phys, 9799 GFP_KERNEL); 9800 if (!dmabuf->virt) { 9801 kfree(dmabuf); 9802 goto out_fail; 9803 } 9804 memset(dmabuf->virt, 0, hw_page_size); 9805 dmabuf->buffer_tag = x; 9806 list_add_tail(&dmabuf->list, &queue->page_list); 9807 /* initialize queue's entry array */ 9808 dma_pointer = dmabuf->virt; 9809 for (; total_qe_count < entry_count && 9810 dma_pointer < (hw_page_size + dmabuf->virt); 9811 total_qe_count++, dma_pointer += entry_size) { 9812 queue->qe[total_qe_count].address = dma_pointer; 9813 } 9814 } 9815 queue->entry_size = entry_size; 9816 queue->entry_count = entry_count; 9817 queue->phba = phba; 9818 9819 return queue; 9820 out_fail: 9821 lpfc_sli4_queue_free(queue); 9822 return NULL; 9823 } 9824 9825 /** 9826 * lpfc_eq_create - Create an Event Queue on the HBA 9827 * @phba: HBA structure that indicates port to create a queue on. 9828 * @eq: The queue structure to use to create the event queue. 9829 * @imax: The maximum interrupt per second limit. 9830 * 9831 * This function creates an event queue, as detailed in @eq, on a port, 9832 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 9833 * 9834 * The @phba struct is used to send mailbox command to HBA. The @eq struct 9835 * is used to get the entry count and entry size that are necessary to 9836 * determine the number of pages to allocate and use for this queue. This 9837 * function will send the EQ_CREATE mailbox command to the HBA to setup the 9838 * event queue. This function is asynchronous and will wait for the mailbox 9839 * command to finish before continuing. 9840 * 9841 * On success this function will return a zero. If unable to allocate enough 9842 * memory this function will return -ENOMEM. If the queue create mailbox command 9843 * fails this function will return -ENXIO. 9844 **/ 9845 uint32_t 9846 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 9847 { 9848 struct lpfc_mbx_eq_create *eq_create; 9849 LPFC_MBOXQ_t *mbox; 9850 int rc, length, status = 0; 9851 struct lpfc_dmabuf *dmabuf; 9852 uint32_t shdr_status, shdr_add_status; 9853 union lpfc_sli4_cfg_shdr *shdr; 9854 uint16_t dmult; 9855 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 9856 9857 if (!phba->sli4_hba.pc_sli4_params.supported) 9858 hw_page_size = SLI4_PAGE_SIZE; 9859 9860 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9861 if (!mbox) 9862 return -ENOMEM; 9863 length = (sizeof(struct lpfc_mbx_eq_create) - 9864 sizeof(struct lpfc_sli4_cfg_mhdr)); 9865 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 9866 LPFC_MBOX_OPCODE_EQ_CREATE, 9867 length, LPFC_SLI4_MBX_EMBED); 9868 eq_create = &mbox->u.mqe.un.eq_create; 9869 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 9870 eq->page_count); 9871 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 9872 LPFC_EQE_SIZE); 9873 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 9874 /* Calculate delay multiper from maximum interrupt per second */ 9875 dmult = LPFC_DMULT_CONST/imax - 1; 9876 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 9877 dmult); 9878 switch (eq->entry_count) { 9879 default: 9880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9881 "0360 Unsupported EQ count. (%d)\n", 9882 eq->entry_count); 9883 if (eq->entry_count < 256) 9884 return -EINVAL; 9885 /* otherwise default to smallest count (drop through) */ 9886 case 256: 9887 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9888 LPFC_EQ_CNT_256); 9889 break; 9890 case 512: 9891 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9892 LPFC_EQ_CNT_512); 9893 break; 9894 case 1024: 9895 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9896 LPFC_EQ_CNT_1024); 9897 break; 9898 case 2048: 9899 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9900 LPFC_EQ_CNT_2048); 9901 break; 9902 case 4096: 9903 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 9904 LPFC_EQ_CNT_4096); 9905 break; 9906 } 9907 list_for_each_entry(dmabuf, &eq->page_list, list) { 9908 memset(dmabuf->virt, 0, hw_page_size); 9909 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9910 putPaddrLow(dmabuf->phys); 9911 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9912 putPaddrHigh(dmabuf->phys); 9913 } 9914 mbox->vport = phba->pport; 9915 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 9916 mbox->context1 = NULL; 9917 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 9918 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 9919 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 9920 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 9921 if (shdr_status || shdr_add_status || rc) { 9922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9923 "2500 EQ_CREATE mailbox failed with " 9924 "status x%x add_status x%x, mbx status x%x\n", 9925 shdr_status, shdr_add_status, rc); 9926 status = -ENXIO; 9927 } 9928 eq->type = LPFC_EQ; 9929 eq->subtype = LPFC_NONE; 9930 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 9931 if (eq->queue_id == 0xFFFF) 9932 status = -ENXIO; 9933 eq->host_index = 0; 9934 eq->hba_index = 0; 9935 9936 mempool_free(mbox, phba->mbox_mem_pool); 9937 return status; 9938 } 9939 9940 /** 9941 * lpfc_cq_create - Create a Completion Queue on the HBA 9942 * @phba: HBA structure that indicates port to create a queue on. 9943 * @cq: The queue structure to use to create the completion queue. 9944 * @eq: The event queue to bind this completion queue to. 9945 * 9946 * This function creates a completion queue, as detailed in @wq, on a port, 9947 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 9948 * 9949 * The @phba struct is used to send mailbox command to HBA. The @cq struct 9950 * is used to get the entry count and entry size that are necessary to 9951 * determine the number of pages to allocate and use for this queue. The @eq 9952 * is used to indicate which event queue to bind this completion queue to. This 9953 * function will send the CQ_CREATE mailbox command to the HBA to setup the 9954 * completion queue. This function is asynchronous and will wait for the mailbox 9955 * command to finish before continuing. 9956 * 9957 * On success this function will return a zero. If unable to allocate enough 9958 * memory this function will return -ENOMEM. If the queue create mailbox command 9959 * fails this function will return -ENXIO. 9960 **/ 9961 uint32_t 9962 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 9963 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 9964 { 9965 struct lpfc_mbx_cq_create *cq_create; 9966 struct lpfc_dmabuf *dmabuf; 9967 LPFC_MBOXQ_t *mbox; 9968 int rc, length, status = 0; 9969 uint32_t shdr_status, shdr_add_status; 9970 union lpfc_sli4_cfg_shdr *shdr; 9971 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 9972 9973 if (!phba->sli4_hba.pc_sli4_params.supported) 9974 hw_page_size = SLI4_PAGE_SIZE; 9975 9976 9977 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9978 if (!mbox) 9979 return -ENOMEM; 9980 length = (sizeof(struct lpfc_mbx_cq_create) - 9981 sizeof(struct lpfc_sli4_cfg_mhdr)); 9982 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 9983 LPFC_MBOX_OPCODE_CQ_CREATE, 9984 length, LPFC_SLI4_MBX_EMBED); 9985 cq_create = &mbox->u.mqe.un.cq_create; 9986 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 9987 cq->page_count); 9988 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 9989 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 9990 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); 9991 switch (cq->entry_count) { 9992 default: 9993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9994 "0361 Unsupported CQ count. (%d)\n", 9995 cq->entry_count); 9996 if (cq->entry_count < 256) 9997 return -EINVAL; 9998 /* otherwise default to smallest count (drop through) */ 9999 case 256: 10000 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 10001 LPFC_CQ_CNT_256); 10002 break; 10003 case 512: 10004 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 10005 LPFC_CQ_CNT_512); 10006 break; 10007 case 1024: 10008 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 10009 LPFC_CQ_CNT_1024); 10010 break; 10011 } 10012 list_for_each_entry(dmabuf, &cq->page_list, list) { 10013 memset(dmabuf->virt, 0, hw_page_size); 10014 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10015 putPaddrLow(dmabuf->phys); 10016 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10017 putPaddrHigh(dmabuf->phys); 10018 } 10019 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10020 10021 /* The IOCTL status is embedded in the mailbox subheader. */ 10022 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 10023 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10024 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10025 if (shdr_status || shdr_add_status || rc) { 10026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10027 "2501 CQ_CREATE mailbox failed with " 10028 "status x%x add_status x%x, mbx status x%x\n", 10029 shdr_status, shdr_add_status, rc); 10030 status = -ENXIO; 10031 goto out; 10032 } 10033 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 10034 if (cq->queue_id == 0xFFFF) { 10035 status = -ENXIO; 10036 goto out; 10037 } 10038 /* link the cq onto the parent eq child list */ 10039 list_add_tail(&cq->list, &eq->child_list); 10040 /* Set up completion queue's type and subtype */ 10041 cq->type = type; 10042 cq->subtype = subtype; 10043 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 10044 cq->host_index = 0; 10045 cq->hba_index = 0; 10046 10047 out: 10048 mempool_free(mbox, phba->mbox_mem_pool); 10049 return status; 10050 } 10051 10052 /** 10053 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 10054 * @phba: HBA structure that indicates port to create a queue on. 10055 * @mq: The queue structure to use to create the mailbox queue. 10056 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 10057 * @cq: The completion queue to associate with this cq. 10058 * 10059 * This function provides failback (fb) functionality when the 10060 * mq_create_ext fails on older FW generations. It's purpose is identical 10061 * to mq_create_ext otherwise. 10062 * 10063 * This routine cannot fail as all attributes were previously accessed and 10064 * initialized in mq_create_ext. 10065 **/ 10066 static void 10067 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 10068 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 10069 { 10070 struct lpfc_mbx_mq_create *mq_create; 10071 struct lpfc_dmabuf *dmabuf; 10072 int length; 10073 10074 length = (sizeof(struct lpfc_mbx_mq_create) - 10075 sizeof(struct lpfc_sli4_cfg_mhdr)); 10076 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10077 LPFC_MBOX_OPCODE_MQ_CREATE, 10078 length, LPFC_SLI4_MBX_EMBED); 10079 mq_create = &mbox->u.mqe.un.mq_create; 10080 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 10081 mq->page_count); 10082 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 10083 cq->queue_id); 10084 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 10085 switch (mq->entry_count) { 10086 case 16: 10087 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10088 LPFC_MQ_CNT_16); 10089 break; 10090 case 32: 10091 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10092 LPFC_MQ_CNT_32); 10093 break; 10094 case 64: 10095 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10096 LPFC_MQ_CNT_64); 10097 break; 10098 case 128: 10099 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10100 LPFC_MQ_CNT_128); 10101 break; 10102 } 10103 list_for_each_entry(dmabuf, &mq->page_list, list) { 10104 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10105 putPaddrLow(dmabuf->phys); 10106 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10107 putPaddrHigh(dmabuf->phys); 10108 } 10109 } 10110 10111 /** 10112 * lpfc_mq_create - Create a mailbox Queue on the HBA 10113 * @phba: HBA structure that indicates port to create a queue on. 10114 * @mq: The queue structure to use to create the mailbox queue. 10115 * @cq: The completion queue to associate with this cq. 10116 * @subtype: The queue's subtype. 10117 * 10118 * This function creates a mailbox queue, as detailed in @mq, on a port, 10119 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 10120 * 10121 * The @phba struct is used to send mailbox command to HBA. The @cq struct 10122 * is used to get the entry count and entry size that are necessary to 10123 * determine the number of pages to allocate and use for this queue. This 10124 * function will send the MQ_CREATE mailbox command to the HBA to setup the 10125 * mailbox queue. This function is asynchronous and will wait for the mailbox 10126 * command to finish before continuing. 10127 * 10128 * On success this function will return a zero. If unable to allocate enough 10129 * memory this function will return -ENOMEM. If the queue create mailbox command 10130 * fails this function will return -ENXIO. 10131 **/ 10132 int32_t 10133 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 10134 struct lpfc_queue *cq, uint32_t subtype) 10135 { 10136 struct lpfc_mbx_mq_create *mq_create; 10137 struct lpfc_mbx_mq_create_ext *mq_create_ext; 10138 struct lpfc_dmabuf *dmabuf; 10139 LPFC_MBOXQ_t *mbox; 10140 int rc, length, status = 0; 10141 uint32_t shdr_status, shdr_add_status; 10142 union lpfc_sli4_cfg_shdr *shdr; 10143 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10144 10145 if (!phba->sli4_hba.pc_sli4_params.supported) 10146 hw_page_size = SLI4_PAGE_SIZE; 10147 10148 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10149 if (!mbox) 10150 return -ENOMEM; 10151 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 10152 sizeof(struct lpfc_sli4_cfg_mhdr)); 10153 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10154 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 10155 length, LPFC_SLI4_MBX_EMBED); 10156 10157 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 10158 bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, 10159 mq->page_count); 10160 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request, 10161 1); 10162 bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste, 10163 &mq_create_ext->u.request, 1); 10164 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 10165 &mq_create_ext->u.request, 1); 10166 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 10167 cq->queue_id); 10168 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 10169 switch (mq->entry_count) { 10170 default: 10171 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10172 "0362 Unsupported MQ count. (%d)\n", 10173 mq->entry_count); 10174 if (mq->entry_count < 16) 10175 return -EINVAL; 10176 /* otherwise default to smallest count (drop through) */ 10177 case 16: 10178 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10179 LPFC_MQ_CNT_16); 10180 break; 10181 case 32: 10182 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10183 LPFC_MQ_CNT_32); 10184 break; 10185 case 64: 10186 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10187 LPFC_MQ_CNT_64); 10188 break; 10189 case 128: 10190 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10191 LPFC_MQ_CNT_128); 10192 break; 10193 } 10194 list_for_each_entry(dmabuf, &mq->page_list, list) { 10195 memset(dmabuf->virt, 0, hw_page_size); 10196 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 10197 putPaddrLow(dmabuf->phys); 10198 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 10199 putPaddrHigh(dmabuf->phys); 10200 } 10201 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10202 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 10203 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10204 &mq_create_ext->u.response); 10205 if (rc != MBX_SUCCESS) { 10206 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10207 "2795 MQ_CREATE_EXT failed with " 10208 "status x%x. Failback to MQ_CREATE.\n", 10209 rc); 10210 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 10211 mq_create = &mbox->u.mqe.un.mq_create; 10212 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10213 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 10214 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10215 &mq_create->u.response); 10216 } 10217 10218 /* The IOCTL status is embedded in the mailbox subheader. */ 10219 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10220 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10221 if (shdr_status || shdr_add_status || rc) { 10222 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10223 "2502 MQ_CREATE mailbox failed with " 10224 "status x%x add_status x%x, mbx status x%x\n", 10225 shdr_status, shdr_add_status, rc); 10226 status = -ENXIO; 10227 goto out; 10228 } 10229 if (mq->queue_id == 0xFFFF) { 10230 status = -ENXIO; 10231 goto out; 10232 } 10233 mq->type = LPFC_MQ; 10234 mq->subtype = subtype; 10235 mq->host_index = 0; 10236 mq->hba_index = 0; 10237 10238 /* link the mq onto the parent cq child list */ 10239 list_add_tail(&mq->list, &cq->child_list); 10240 out: 10241 mempool_free(mbox, phba->mbox_mem_pool); 10242 return status; 10243 } 10244 10245 /** 10246 * lpfc_wq_create - Create a Work Queue on the HBA 10247 * @phba: HBA structure that indicates port to create a queue on. 10248 * @wq: The queue structure to use to create the work queue. 10249 * @cq: The completion queue to bind this work queue to. 10250 * @subtype: The subtype of the work queue indicating its functionality. 10251 * 10252 * This function creates a work queue, as detailed in @wq, on a port, described 10253 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 10254 * 10255 * The @phba struct is used to send mailbox command to HBA. The @wq struct 10256 * is used to get the entry count and entry size that are necessary to 10257 * determine the number of pages to allocate and use for this queue. The @cq 10258 * is used to indicate which completion queue to bind this work queue to. This 10259 * function will send the WQ_CREATE mailbox command to the HBA to setup the 10260 * work queue. This function is asynchronous and will wait for the mailbox 10261 * command to finish before continuing. 10262 * 10263 * On success this function will return a zero. If unable to allocate enough 10264 * memory this function will return -ENOMEM. If the queue create mailbox command 10265 * fails this function will return -ENXIO. 10266 **/ 10267 uint32_t 10268 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 10269 struct lpfc_queue *cq, uint32_t subtype) 10270 { 10271 struct lpfc_mbx_wq_create *wq_create; 10272 struct lpfc_dmabuf *dmabuf; 10273 LPFC_MBOXQ_t *mbox; 10274 int rc, length, status = 0; 10275 uint32_t shdr_status, shdr_add_status; 10276 union lpfc_sli4_cfg_shdr *shdr; 10277 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10278 10279 if (!phba->sli4_hba.pc_sli4_params.supported) 10280 hw_page_size = SLI4_PAGE_SIZE; 10281 10282 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10283 if (!mbox) 10284 return -ENOMEM; 10285 length = (sizeof(struct lpfc_mbx_wq_create) - 10286 sizeof(struct lpfc_sli4_cfg_mhdr)); 10287 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10288 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 10289 length, LPFC_SLI4_MBX_EMBED); 10290 wq_create = &mbox->u.mqe.un.wq_create; 10291 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 10292 wq->page_count); 10293 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 10294 cq->queue_id); 10295 list_for_each_entry(dmabuf, &wq->page_list, list) { 10296 memset(dmabuf->virt, 0, hw_page_size); 10297 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10298 putPaddrLow(dmabuf->phys); 10299 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10300 putPaddrHigh(dmabuf->phys); 10301 } 10302 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10303 /* The IOCTL status is embedded in the mailbox subheader. */ 10304 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 10305 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10306 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10307 if (shdr_status || shdr_add_status || rc) { 10308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10309 "2503 WQ_CREATE mailbox failed with " 10310 "status x%x add_status x%x, mbx status x%x\n", 10311 shdr_status, shdr_add_status, rc); 10312 status = -ENXIO; 10313 goto out; 10314 } 10315 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 10316 if (wq->queue_id == 0xFFFF) { 10317 status = -ENXIO; 10318 goto out; 10319 } 10320 wq->type = LPFC_WQ; 10321 wq->subtype = subtype; 10322 wq->host_index = 0; 10323 wq->hba_index = 0; 10324 10325 /* link the wq onto the parent cq child list */ 10326 list_add_tail(&wq->list, &cq->child_list); 10327 out: 10328 mempool_free(mbox, phba->mbox_mem_pool); 10329 return status; 10330 } 10331 10332 /** 10333 * lpfc_rq_create - Create a Receive Queue on the HBA 10334 * @phba: HBA structure that indicates port to create a queue on. 10335 * @hrq: The queue structure to use to create the header receive queue. 10336 * @drq: The queue structure to use to create the data receive queue. 10337 * @cq: The completion queue to bind this work queue to. 10338 * 10339 * This function creates a receive buffer queue pair , as detailed in @hrq and 10340 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 10341 * to the HBA. 10342 * 10343 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 10344 * struct is used to get the entry count that is necessary to determine the 10345 * number of pages to use for this queue. The @cq is used to indicate which 10346 * completion queue to bind received buffers that are posted to these queues to. 10347 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 10348 * receive queue pair. This function is asynchronous and will wait for the 10349 * mailbox command to finish before continuing. 10350 * 10351 * On success this function will return a zero. If unable to allocate enough 10352 * memory this function will return -ENOMEM. If the queue create mailbox command 10353 * fails this function will return -ENXIO. 10354 **/ 10355 uint32_t 10356 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10357 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 10358 { 10359 struct lpfc_mbx_rq_create *rq_create; 10360 struct lpfc_dmabuf *dmabuf; 10361 LPFC_MBOXQ_t *mbox; 10362 int rc, length, status = 0; 10363 uint32_t shdr_status, shdr_add_status; 10364 union lpfc_sli4_cfg_shdr *shdr; 10365 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10366 10367 if (!phba->sli4_hba.pc_sli4_params.supported) 10368 hw_page_size = SLI4_PAGE_SIZE; 10369 10370 if (hrq->entry_count != drq->entry_count) 10371 return -EINVAL; 10372 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10373 if (!mbox) 10374 return -ENOMEM; 10375 length = (sizeof(struct lpfc_mbx_rq_create) - 10376 sizeof(struct lpfc_sli4_cfg_mhdr)); 10377 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10378 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10379 length, LPFC_SLI4_MBX_EMBED); 10380 rq_create = &mbox->u.mqe.un.rq_create; 10381 switch (hrq->entry_count) { 10382 default: 10383 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10384 "2535 Unsupported RQ count. (%d)\n", 10385 hrq->entry_count); 10386 if (hrq->entry_count < 512) 10387 return -EINVAL; 10388 /* otherwise default to smallest count (drop through) */ 10389 case 512: 10390 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10391 LPFC_RQ_RING_SIZE_512); 10392 break; 10393 case 1024: 10394 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10395 LPFC_RQ_RING_SIZE_1024); 10396 break; 10397 case 2048: 10398 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10399 LPFC_RQ_RING_SIZE_2048); 10400 break; 10401 case 4096: 10402 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10403 LPFC_RQ_RING_SIZE_4096); 10404 break; 10405 } 10406 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 10407 cq->queue_id); 10408 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 10409 hrq->page_count); 10410 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10411 LPFC_HDR_BUF_SIZE); 10412 list_for_each_entry(dmabuf, &hrq->page_list, list) { 10413 memset(dmabuf->virt, 0, hw_page_size); 10414 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10415 putPaddrLow(dmabuf->phys); 10416 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10417 putPaddrHigh(dmabuf->phys); 10418 } 10419 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10420 /* The IOCTL status is embedded in the mailbox subheader. */ 10421 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 10422 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10423 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10424 if (shdr_status || shdr_add_status || rc) { 10425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10426 "2504 RQ_CREATE mailbox failed with " 10427 "status x%x add_status x%x, mbx status x%x\n", 10428 shdr_status, shdr_add_status, rc); 10429 status = -ENXIO; 10430 goto out; 10431 } 10432 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 10433 if (hrq->queue_id == 0xFFFF) { 10434 status = -ENXIO; 10435 goto out; 10436 } 10437 hrq->type = LPFC_HRQ; 10438 hrq->subtype = subtype; 10439 hrq->host_index = 0; 10440 hrq->hba_index = 0; 10441 10442 /* now create the data queue */ 10443 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10444 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10445 length, LPFC_SLI4_MBX_EMBED); 10446 switch (drq->entry_count) { 10447 default: 10448 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10449 "2536 Unsupported RQ count. (%d)\n", 10450 drq->entry_count); 10451 if (drq->entry_count < 512) 10452 return -EINVAL; 10453 /* otherwise default to smallest count (drop through) */ 10454 case 512: 10455 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10456 LPFC_RQ_RING_SIZE_512); 10457 break; 10458 case 1024: 10459 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10460 LPFC_RQ_RING_SIZE_1024); 10461 break; 10462 case 2048: 10463 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10464 LPFC_RQ_RING_SIZE_2048); 10465 break; 10466 case 4096: 10467 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10468 LPFC_RQ_RING_SIZE_4096); 10469 break; 10470 } 10471 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 10472 cq->queue_id); 10473 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 10474 drq->page_count); 10475 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10476 LPFC_DATA_BUF_SIZE); 10477 list_for_each_entry(dmabuf, &drq->page_list, list) { 10478 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10479 putPaddrLow(dmabuf->phys); 10480 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10481 putPaddrHigh(dmabuf->phys); 10482 } 10483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10484 /* The IOCTL status is embedded in the mailbox subheader. */ 10485 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 10486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10487 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10488 if (shdr_status || shdr_add_status || rc) { 10489 status = -ENXIO; 10490 goto out; 10491 } 10492 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 10493 if (drq->queue_id == 0xFFFF) { 10494 status = -ENXIO; 10495 goto out; 10496 } 10497 drq->type = LPFC_DRQ; 10498 drq->subtype = subtype; 10499 drq->host_index = 0; 10500 drq->hba_index = 0; 10501 10502 /* link the header and data RQs onto the parent cq child list */ 10503 list_add_tail(&hrq->list, &cq->child_list); 10504 list_add_tail(&drq->list, &cq->child_list); 10505 10506 out: 10507 mempool_free(mbox, phba->mbox_mem_pool); 10508 return status; 10509 } 10510 10511 /** 10512 * lpfc_eq_destroy - Destroy an event Queue on the HBA 10513 * @eq: The queue structure associated with the queue to destroy. 10514 * 10515 * This function destroys a queue, as detailed in @eq by sending an mailbox 10516 * command, specific to the type of queue, to the HBA. 10517 * 10518 * The @eq struct is used to get the queue ID of the queue to destroy. 10519 * 10520 * On success this function will return a zero. If the queue destroy mailbox 10521 * command fails this function will return -ENXIO. 10522 **/ 10523 uint32_t 10524 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 10525 { 10526 LPFC_MBOXQ_t *mbox; 10527 int rc, length, status = 0; 10528 uint32_t shdr_status, shdr_add_status; 10529 union lpfc_sli4_cfg_shdr *shdr; 10530 10531 if (!eq) 10532 return -ENODEV; 10533 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 10534 if (!mbox) 10535 return -ENOMEM; 10536 length = (sizeof(struct lpfc_mbx_eq_destroy) - 10537 sizeof(struct lpfc_sli4_cfg_mhdr)); 10538 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10539 LPFC_MBOX_OPCODE_EQ_DESTROY, 10540 length, LPFC_SLI4_MBX_EMBED); 10541 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 10542 eq->queue_id); 10543 mbox->vport = eq->phba->pport; 10544 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10545 10546 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 10547 /* The IOCTL status is embedded in the mailbox subheader. */ 10548 shdr = (union lpfc_sli4_cfg_shdr *) 10549 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 10550 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10551 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10552 if (shdr_status || shdr_add_status || rc) { 10553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10554 "2505 EQ_DESTROY mailbox failed with " 10555 "status x%x add_status x%x, mbx status x%x\n", 10556 shdr_status, shdr_add_status, rc); 10557 status = -ENXIO; 10558 } 10559 10560 /* Remove eq from any list */ 10561 list_del_init(&eq->list); 10562 mempool_free(mbox, eq->phba->mbox_mem_pool); 10563 return status; 10564 } 10565 10566 /** 10567 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 10568 * @cq: The queue structure associated with the queue to destroy. 10569 * 10570 * This function destroys a queue, as detailed in @cq by sending an mailbox 10571 * command, specific to the type of queue, to the HBA. 10572 * 10573 * The @cq struct is used to get the queue ID of the queue to destroy. 10574 * 10575 * On success this function will return a zero. If the queue destroy mailbox 10576 * command fails this function will return -ENXIO. 10577 **/ 10578 uint32_t 10579 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 10580 { 10581 LPFC_MBOXQ_t *mbox; 10582 int rc, length, status = 0; 10583 uint32_t shdr_status, shdr_add_status; 10584 union lpfc_sli4_cfg_shdr *shdr; 10585 10586 if (!cq) 10587 return -ENODEV; 10588 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 10589 if (!mbox) 10590 return -ENOMEM; 10591 length = (sizeof(struct lpfc_mbx_cq_destroy) - 10592 sizeof(struct lpfc_sli4_cfg_mhdr)); 10593 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10594 LPFC_MBOX_OPCODE_CQ_DESTROY, 10595 length, LPFC_SLI4_MBX_EMBED); 10596 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 10597 cq->queue_id); 10598 mbox->vport = cq->phba->pport; 10599 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10600 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 10601 /* The IOCTL status is embedded in the mailbox subheader. */ 10602 shdr = (union lpfc_sli4_cfg_shdr *) 10603 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 10604 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10605 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10606 if (shdr_status || shdr_add_status || rc) { 10607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10608 "2506 CQ_DESTROY mailbox failed with " 10609 "status x%x add_status x%x, mbx status x%x\n", 10610 shdr_status, shdr_add_status, rc); 10611 status = -ENXIO; 10612 } 10613 /* Remove cq from any list */ 10614 list_del_init(&cq->list); 10615 mempool_free(mbox, cq->phba->mbox_mem_pool); 10616 return status; 10617 } 10618 10619 /** 10620 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 10621 * @qm: The queue structure associated with the queue to destroy. 10622 * 10623 * This function destroys a queue, as detailed in @mq by sending an mailbox 10624 * command, specific to the type of queue, to the HBA. 10625 * 10626 * The @mq struct is used to get the queue ID of the queue to destroy. 10627 * 10628 * On success this function will return a zero. If the queue destroy mailbox 10629 * command fails this function will return -ENXIO. 10630 **/ 10631 uint32_t 10632 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 10633 { 10634 LPFC_MBOXQ_t *mbox; 10635 int rc, length, status = 0; 10636 uint32_t shdr_status, shdr_add_status; 10637 union lpfc_sli4_cfg_shdr *shdr; 10638 10639 if (!mq) 10640 return -ENODEV; 10641 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 10642 if (!mbox) 10643 return -ENOMEM; 10644 length = (sizeof(struct lpfc_mbx_mq_destroy) - 10645 sizeof(struct lpfc_sli4_cfg_mhdr)); 10646 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10647 LPFC_MBOX_OPCODE_MQ_DESTROY, 10648 length, LPFC_SLI4_MBX_EMBED); 10649 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 10650 mq->queue_id); 10651 mbox->vport = mq->phba->pport; 10652 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10653 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 10654 /* The IOCTL status is embedded in the mailbox subheader. */ 10655 shdr = (union lpfc_sli4_cfg_shdr *) 10656 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 10657 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10658 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10659 if (shdr_status || shdr_add_status || rc) { 10660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10661 "2507 MQ_DESTROY mailbox failed with " 10662 "status x%x add_status x%x, mbx status x%x\n", 10663 shdr_status, shdr_add_status, rc); 10664 status = -ENXIO; 10665 } 10666 /* Remove mq from any list */ 10667 list_del_init(&mq->list); 10668 mempool_free(mbox, mq->phba->mbox_mem_pool); 10669 return status; 10670 } 10671 10672 /** 10673 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 10674 * @wq: The queue structure associated with the queue to destroy. 10675 * 10676 * This function destroys a queue, as detailed in @wq by sending an mailbox 10677 * command, specific to the type of queue, to the HBA. 10678 * 10679 * The @wq struct is used to get the queue ID of the queue to destroy. 10680 * 10681 * On success this function will return a zero. If the queue destroy mailbox 10682 * command fails this function will return -ENXIO. 10683 **/ 10684 uint32_t 10685 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 10686 { 10687 LPFC_MBOXQ_t *mbox; 10688 int rc, length, status = 0; 10689 uint32_t shdr_status, shdr_add_status; 10690 union lpfc_sli4_cfg_shdr *shdr; 10691 10692 if (!wq) 10693 return -ENODEV; 10694 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 10695 if (!mbox) 10696 return -ENOMEM; 10697 length = (sizeof(struct lpfc_mbx_wq_destroy) - 10698 sizeof(struct lpfc_sli4_cfg_mhdr)); 10699 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10700 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 10701 length, LPFC_SLI4_MBX_EMBED); 10702 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 10703 wq->queue_id); 10704 mbox->vport = wq->phba->pport; 10705 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10706 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 10707 shdr = (union lpfc_sli4_cfg_shdr *) 10708 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 10709 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10710 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10711 if (shdr_status || shdr_add_status || rc) { 10712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10713 "2508 WQ_DESTROY mailbox failed with " 10714 "status x%x add_status x%x, mbx status x%x\n", 10715 shdr_status, shdr_add_status, rc); 10716 status = -ENXIO; 10717 } 10718 /* Remove wq from any list */ 10719 list_del_init(&wq->list); 10720 mempool_free(mbox, wq->phba->mbox_mem_pool); 10721 return status; 10722 } 10723 10724 /** 10725 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 10726 * @rq: The queue structure associated with the queue to destroy. 10727 * 10728 * This function destroys a queue, as detailed in @rq by sending an mailbox 10729 * command, specific to the type of queue, to the HBA. 10730 * 10731 * The @rq struct is used to get the queue ID of the queue to destroy. 10732 * 10733 * On success this function will return a zero. If the queue destroy mailbox 10734 * command fails this function will return -ENXIO. 10735 **/ 10736 uint32_t 10737 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10738 struct lpfc_queue *drq) 10739 { 10740 LPFC_MBOXQ_t *mbox; 10741 int rc, length, status = 0; 10742 uint32_t shdr_status, shdr_add_status; 10743 union lpfc_sli4_cfg_shdr *shdr; 10744 10745 if (!hrq || !drq) 10746 return -ENODEV; 10747 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 10748 if (!mbox) 10749 return -ENOMEM; 10750 length = (sizeof(struct lpfc_mbx_rq_destroy) - 10751 sizeof(struct mbox_header)); 10752 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10753 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 10754 length, LPFC_SLI4_MBX_EMBED); 10755 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 10756 hrq->queue_id); 10757 mbox->vport = hrq->phba->pport; 10758 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10759 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 10760 /* The IOCTL status is embedded in the mailbox subheader. */ 10761 shdr = (union lpfc_sli4_cfg_shdr *) 10762 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 10763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10765 if (shdr_status || shdr_add_status || rc) { 10766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10767 "2509 RQ_DESTROY mailbox failed with " 10768 "status x%x add_status x%x, mbx status x%x\n", 10769 shdr_status, shdr_add_status, rc); 10770 if (rc != MBX_TIMEOUT) 10771 mempool_free(mbox, hrq->phba->mbox_mem_pool); 10772 return -ENXIO; 10773 } 10774 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 10775 drq->queue_id); 10776 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 10777 shdr = (union lpfc_sli4_cfg_shdr *) 10778 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 10779 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10780 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10781 if (shdr_status || shdr_add_status || rc) { 10782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10783 "2510 RQ_DESTROY mailbox failed with " 10784 "status x%x add_status x%x, mbx status x%x\n", 10785 shdr_status, shdr_add_status, rc); 10786 status = -ENXIO; 10787 } 10788 list_del_init(&hrq->list); 10789 list_del_init(&drq->list); 10790 mempool_free(mbox, hrq->phba->mbox_mem_pool); 10791 return status; 10792 } 10793 10794 /** 10795 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 10796 * @phba: The virtual port for which this call being executed. 10797 * @pdma_phys_addr0: Physical address of the 1st SGL page. 10798 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 10799 * @xritag: the xritag that ties this io to the SGL pages. 10800 * 10801 * This routine will post the sgl pages for the IO that has the xritag 10802 * that is in the iocbq structure. The xritag is assigned during iocbq 10803 * creation and persists for as long as the driver is loaded. 10804 * if the caller has fewer than 256 scatter gather segments to map then 10805 * pdma_phys_addr1 should be 0. 10806 * If the caller needs to map more than 256 scatter gather segment then 10807 * pdma_phys_addr1 should be a valid physical address. 10808 * physical address for SGLs must be 64 byte aligned. 10809 * If you are going to map 2 SGL's then the first one must have 256 entries 10810 * the second sgl can have between 1 and 256 entries. 10811 * 10812 * Return codes: 10813 * 0 - Success 10814 * -ENXIO, -ENOMEM - Failure 10815 **/ 10816 int 10817 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 10818 dma_addr_t pdma_phys_addr0, 10819 dma_addr_t pdma_phys_addr1, 10820 uint16_t xritag) 10821 { 10822 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 10823 LPFC_MBOXQ_t *mbox; 10824 int rc; 10825 uint32_t shdr_status, shdr_add_status; 10826 union lpfc_sli4_cfg_shdr *shdr; 10827 10828 if (xritag == NO_XRI) { 10829 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10830 "0364 Invalid param:\n"); 10831 return -EINVAL; 10832 } 10833 10834 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10835 if (!mbox) 10836 return -ENOMEM; 10837 10838 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10839 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 10840 sizeof(struct lpfc_mbx_post_sgl_pages) - 10841 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); 10842 10843 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 10844 &mbox->u.mqe.un.post_sgl_pages; 10845 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 10846 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 10847 10848 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 10849 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 10850 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 10851 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 10852 10853 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 10854 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 10855 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 10856 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 10857 if (!phba->sli4_hba.intr_enable) 10858 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10859 else 10860 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 10861 /* The IOCTL status is embedded in the mailbox subheader. */ 10862 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 10863 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10864 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10865 if (rc != MBX_TIMEOUT) 10866 mempool_free(mbox, phba->mbox_mem_pool); 10867 if (shdr_status || shdr_add_status || rc) { 10868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10869 "2511 POST_SGL mailbox failed with " 10870 "status x%x add_status x%x, mbx status x%x\n", 10871 shdr_status, shdr_add_status, rc); 10872 rc = -ENXIO; 10873 } 10874 return 0; 10875 } 10876 10877 /** 10878 * lpfc_sli4_next_xritag - Get an xritag for the io 10879 * @phba: Pointer to HBA context object. 10880 * 10881 * This function gets an xritag for the iocb. If there is no unused xritag 10882 * it will return 0xffff. 10883 * The function returns the allocated xritag if successful, else returns zero. 10884 * Zero is not a valid xritag. 10885 * The caller is not required to hold any lock. 10886 **/ 10887 uint16_t 10888 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 10889 { 10890 uint16_t xritag; 10891 10892 spin_lock_irq(&phba->hbalock); 10893 xritag = phba->sli4_hba.next_xri; 10894 if ((xritag != (uint16_t) -1) && xritag < 10895 (phba->sli4_hba.max_cfg_param.max_xri 10896 + phba->sli4_hba.max_cfg_param.xri_base)) { 10897 phba->sli4_hba.next_xri++; 10898 phba->sli4_hba.max_cfg_param.xri_used++; 10899 spin_unlock_irq(&phba->hbalock); 10900 return xritag; 10901 } 10902 spin_unlock_irq(&phba->hbalock); 10903 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10904 "2004 Failed to allocate XRI.last XRITAG is %d" 10905 " Max XRI is %d, Used XRI is %d\n", 10906 phba->sli4_hba.next_xri, 10907 phba->sli4_hba.max_cfg_param.max_xri, 10908 phba->sli4_hba.max_cfg_param.xri_used); 10909 return -1; 10910 } 10911 10912 /** 10913 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. 10914 * @phba: pointer to lpfc hba data structure. 10915 * 10916 * This routine is invoked to post a block of driver's sgl pages to the 10917 * HBA using non-embedded mailbox command. No Lock is held. This routine 10918 * is only called when the driver is loading and after all IO has been 10919 * stopped. 10920 **/ 10921 int 10922 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) 10923 { 10924 struct lpfc_sglq *sglq_entry; 10925 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 10926 struct sgl_page_pairs *sgl_pg_pairs; 10927 void *viraddr; 10928 LPFC_MBOXQ_t *mbox; 10929 uint32_t reqlen, alloclen, pg_pairs; 10930 uint32_t mbox_tmo; 10931 uint16_t xritag_start = 0; 10932 int els_xri_cnt, rc = 0; 10933 uint32_t shdr_status, shdr_add_status; 10934 union lpfc_sli4_cfg_shdr *shdr; 10935 10936 /* The number of sgls to be posted */ 10937 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 10938 10939 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 10940 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 10941 if (reqlen > SLI4_PAGE_SIZE) { 10942 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10943 "2559 Block sgl registration required DMA " 10944 "size (%d) great than a page\n", reqlen); 10945 return -ENOMEM; 10946 } 10947 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10948 if (!mbox) { 10949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10950 "2560 Failed to allocate mbox cmd memory\n"); 10951 return -ENOMEM; 10952 } 10953 10954 /* Allocate DMA memory and set up the non-embedded mailbox command */ 10955 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10956 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 10957 LPFC_SLI4_MBX_NEMBED); 10958 10959 if (alloclen < reqlen) { 10960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10961 "0285 Allocated DMA memory size (%d) is " 10962 "less than the requested DMA memory " 10963 "size (%d)\n", alloclen, reqlen); 10964 lpfc_sli4_mbox_cmd_free(phba, mbox); 10965 return -ENOMEM; 10966 } 10967 /* Get the first SGE entry from the non-embedded DMA memory */ 10968 viraddr = mbox->sge_array->addr[0]; 10969 10970 /* Set up the SGL pages in the non-embedded DMA pages */ 10971 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 10972 sgl_pg_pairs = &sgl->sgl_pg_pairs; 10973 10974 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 10975 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 10976 /* Set up the sge entry */ 10977 sgl_pg_pairs->sgl_pg0_addr_lo = 10978 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 10979 sgl_pg_pairs->sgl_pg0_addr_hi = 10980 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 10981 sgl_pg_pairs->sgl_pg1_addr_lo = 10982 cpu_to_le32(putPaddrLow(0)); 10983 sgl_pg_pairs->sgl_pg1_addr_hi = 10984 cpu_to_le32(putPaddrHigh(0)); 10985 /* Keep the first xritag on the list */ 10986 if (pg_pairs == 0) 10987 xritag_start = sglq_entry->sli4_xritag; 10988 sgl_pg_pairs++; 10989 } 10990 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 10991 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 10992 /* Perform endian conversion if necessary */ 10993 sgl->word0 = cpu_to_le32(sgl->word0); 10994 10995 if (!phba->sli4_hba.intr_enable) 10996 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10997 else { 10998 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 10999 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 11000 } 11001 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 11002 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11003 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11004 if (rc != MBX_TIMEOUT) 11005 lpfc_sli4_mbox_cmd_free(phba, mbox); 11006 if (shdr_status || shdr_add_status || rc) { 11007 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11008 "2513 POST_SGL_BLOCK mailbox command failed " 11009 "status x%x add_status x%x mbx status x%x\n", 11010 shdr_status, shdr_add_status, rc); 11011 rc = -ENXIO; 11012 } 11013 return rc; 11014 } 11015 11016 /** 11017 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 11018 * @phba: pointer to lpfc hba data structure. 11019 * @sblist: pointer to scsi buffer list. 11020 * @count: number of scsi buffers on the list. 11021 * 11022 * This routine is invoked to post a block of @count scsi sgl pages from a 11023 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 11024 * No Lock is held. 11025 * 11026 **/ 11027 int 11028 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 11029 int cnt) 11030 { 11031 struct lpfc_scsi_buf *psb; 11032 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 11033 struct sgl_page_pairs *sgl_pg_pairs; 11034 void *viraddr; 11035 LPFC_MBOXQ_t *mbox; 11036 uint32_t reqlen, alloclen, pg_pairs; 11037 uint32_t mbox_tmo; 11038 uint16_t xritag_start = 0; 11039 int rc = 0; 11040 uint32_t shdr_status, shdr_add_status; 11041 dma_addr_t pdma_phys_bpl1; 11042 union lpfc_sli4_cfg_shdr *shdr; 11043 11044 /* Calculate the requested length of the dma memory */ 11045 reqlen = cnt * sizeof(struct sgl_page_pairs) + 11046 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 11047 if (reqlen > SLI4_PAGE_SIZE) { 11048 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11049 "0217 Block sgl registration required DMA " 11050 "size (%d) great than a page\n", reqlen); 11051 return -ENOMEM; 11052 } 11053 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11054 if (!mbox) { 11055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11056 "0283 Failed to allocate mbox cmd memory\n"); 11057 return -ENOMEM; 11058 } 11059 11060 /* Allocate DMA memory and set up the non-embedded mailbox command */ 11061 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11062 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 11063 LPFC_SLI4_MBX_NEMBED); 11064 11065 if (alloclen < reqlen) { 11066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11067 "2561 Allocated DMA memory size (%d) is " 11068 "less than the requested DMA memory " 11069 "size (%d)\n", alloclen, reqlen); 11070 lpfc_sli4_mbox_cmd_free(phba, mbox); 11071 return -ENOMEM; 11072 } 11073 /* Get the first SGE entry from the non-embedded DMA memory */ 11074 viraddr = mbox->sge_array->addr[0]; 11075 11076 /* Set up the SGL pages in the non-embedded DMA pages */ 11077 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 11078 sgl_pg_pairs = &sgl->sgl_pg_pairs; 11079 11080 pg_pairs = 0; 11081 list_for_each_entry(psb, sblist, list) { 11082 /* Set up the sge entry */ 11083 sgl_pg_pairs->sgl_pg0_addr_lo = 11084 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 11085 sgl_pg_pairs->sgl_pg0_addr_hi = 11086 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 11087 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 11088 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 11089 else 11090 pdma_phys_bpl1 = 0; 11091 sgl_pg_pairs->sgl_pg1_addr_lo = 11092 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 11093 sgl_pg_pairs->sgl_pg1_addr_hi = 11094 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 11095 /* Keep the first xritag on the list */ 11096 if (pg_pairs == 0) 11097 xritag_start = psb->cur_iocbq.sli4_xritag; 11098 sgl_pg_pairs++; 11099 pg_pairs++; 11100 } 11101 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 11102 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 11103 /* Perform endian conversion if necessary */ 11104 sgl->word0 = cpu_to_le32(sgl->word0); 11105 11106 if (!phba->sli4_hba.intr_enable) 11107 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11108 else { 11109 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 11110 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 11111 } 11112 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 11113 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11114 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11115 if (rc != MBX_TIMEOUT) 11116 lpfc_sli4_mbox_cmd_free(phba, mbox); 11117 if (shdr_status || shdr_add_status || rc) { 11118 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11119 "2564 POST_SGL_BLOCK mailbox command failed " 11120 "status x%x add_status x%x mbx status x%x\n", 11121 shdr_status, shdr_add_status, rc); 11122 rc = -ENXIO; 11123 } 11124 return rc; 11125 } 11126 11127 /** 11128 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 11129 * @phba: pointer to lpfc_hba struct that the frame was received on 11130 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11131 * 11132 * This function checks the fields in the @fc_hdr to see if the FC frame is a 11133 * valid type of frame that the LPFC driver will handle. This function will 11134 * return a zero if the frame is a valid frame or a non zero value when the 11135 * frame does not pass the check. 11136 **/ 11137 static int 11138 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 11139 { 11140 char *rctl_names[] = FC_RCTL_NAMES_INIT; 11141 char *type_names[] = FC_TYPE_NAMES_INIT; 11142 struct fc_vft_header *fc_vft_hdr; 11143 11144 switch (fc_hdr->fh_r_ctl) { 11145 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 11146 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 11147 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 11148 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 11149 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 11150 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 11151 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 11152 case FC_RCTL_DD_CMD_STATUS: /* command status */ 11153 case FC_RCTL_ELS_REQ: /* extended link services request */ 11154 case FC_RCTL_ELS_REP: /* extended link services reply */ 11155 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 11156 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 11157 case FC_RCTL_BA_NOP: /* basic link service NOP */ 11158 case FC_RCTL_BA_ABTS: /* basic link service abort */ 11159 case FC_RCTL_BA_RMC: /* remove connection */ 11160 case FC_RCTL_BA_ACC: /* basic accept */ 11161 case FC_RCTL_BA_RJT: /* basic reject */ 11162 case FC_RCTL_BA_PRMT: 11163 case FC_RCTL_ACK_1: /* acknowledge_1 */ 11164 case FC_RCTL_ACK_0: /* acknowledge_0 */ 11165 case FC_RCTL_P_RJT: /* port reject */ 11166 case FC_RCTL_F_RJT: /* fabric reject */ 11167 case FC_RCTL_P_BSY: /* port busy */ 11168 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 11169 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 11170 case FC_RCTL_LCR: /* link credit reset */ 11171 case FC_RCTL_END: /* end */ 11172 break; 11173 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 11174 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 11175 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 11176 return lpfc_fc_frame_check(phba, fc_hdr); 11177 default: 11178 goto drop; 11179 } 11180 switch (fc_hdr->fh_type) { 11181 case FC_TYPE_BLS: 11182 case FC_TYPE_ELS: 11183 case FC_TYPE_FCP: 11184 case FC_TYPE_CT: 11185 break; 11186 case FC_TYPE_IP: 11187 case FC_TYPE_ILS: 11188 default: 11189 goto drop; 11190 } 11191 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11192 "2538 Received frame rctl:%s type:%s\n", 11193 rctl_names[fc_hdr->fh_r_ctl], 11194 type_names[fc_hdr->fh_type]); 11195 return 0; 11196 drop: 11197 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 11198 "2539 Dropped frame rctl:%s type:%s\n", 11199 rctl_names[fc_hdr->fh_r_ctl], 11200 type_names[fc_hdr->fh_type]); 11201 return 1; 11202 } 11203 11204 /** 11205 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 11206 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11207 * 11208 * This function processes the FC header to retrieve the VFI from the VF 11209 * header, if one exists. This function will return the VFI if one exists 11210 * or 0 if no VSAN Header exists. 11211 **/ 11212 static uint32_t 11213 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 11214 { 11215 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 11216 11217 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 11218 return 0; 11219 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 11220 } 11221 11222 /** 11223 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 11224 * @phba: Pointer to the HBA structure to search for the vport on 11225 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11226 * @fcfi: The FC Fabric ID that the frame came from 11227 * 11228 * This function searches the @phba for a vport that matches the content of the 11229 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 11230 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 11231 * returns the matching vport pointer or NULL if unable to match frame to a 11232 * vport. 11233 **/ 11234 static struct lpfc_vport * 11235 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 11236 uint16_t fcfi) 11237 { 11238 struct lpfc_vport **vports; 11239 struct lpfc_vport *vport = NULL; 11240 int i; 11241 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 11242 fc_hdr->fh_d_id[1] << 8 | 11243 fc_hdr->fh_d_id[2]); 11244 11245 vports = lpfc_create_vport_work_array(phba); 11246 if (vports != NULL) 11247 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 11248 if (phba->fcf.fcfi == fcfi && 11249 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 11250 vports[i]->fc_myDID == did) { 11251 vport = vports[i]; 11252 break; 11253 } 11254 } 11255 lpfc_destroy_vport_work_array(phba, vports); 11256 return vport; 11257 } 11258 11259 /** 11260 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 11261 * @vport: The vport to work on. 11262 * 11263 * This function updates the receive sequence time stamp for this vport. The 11264 * receive sequence time stamp indicates the time that the last frame of the 11265 * the sequence that has been idle for the longest amount of time was received. 11266 * the driver uses this time stamp to indicate if any received sequences have 11267 * timed out. 11268 **/ 11269 void 11270 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 11271 { 11272 struct lpfc_dmabuf *h_buf; 11273 struct hbq_dmabuf *dmabuf = NULL; 11274 11275 /* get the oldest sequence on the rcv list */ 11276 h_buf = list_get_first(&vport->rcv_buffer_list, 11277 struct lpfc_dmabuf, list); 11278 if (!h_buf) 11279 return; 11280 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11281 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 11282 } 11283 11284 /** 11285 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 11286 * @vport: The vport that the received sequences were sent to. 11287 * 11288 * This function cleans up all outstanding received sequences. This is called 11289 * by the driver when a link event or user action invalidates all the received 11290 * sequences. 11291 **/ 11292 void 11293 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 11294 { 11295 struct lpfc_dmabuf *h_buf, *hnext; 11296 struct lpfc_dmabuf *d_buf, *dnext; 11297 struct hbq_dmabuf *dmabuf = NULL; 11298 11299 /* start with the oldest sequence on the rcv list */ 11300 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 11301 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11302 list_del_init(&dmabuf->hbuf.list); 11303 list_for_each_entry_safe(d_buf, dnext, 11304 &dmabuf->dbuf.list, list) { 11305 list_del_init(&d_buf->list); 11306 lpfc_in_buf_free(vport->phba, d_buf); 11307 } 11308 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 11309 } 11310 } 11311 11312 /** 11313 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 11314 * @vport: The vport that the received sequences were sent to. 11315 * 11316 * This function determines whether any received sequences have timed out by 11317 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 11318 * indicates that there is at least one timed out sequence this routine will 11319 * go through the received sequences one at a time from most inactive to most 11320 * active to determine which ones need to be cleaned up. Once it has determined 11321 * that a sequence needs to be cleaned up it will simply free up the resources 11322 * without sending an abort. 11323 **/ 11324 void 11325 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 11326 { 11327 struct lpfc_dmabuf *h_buf, *hnext; 11328 struct lpfc_dmabuf *d_buf, *dnext; 11329 struct hbq_dmabuf *dmabuf = NULL; 11330 unsigned long timeout; 11331 int abort_count = 0; 11332 11333 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 11334 vport->rcv_buffer_time_stamp); 11335 if (list_empty(&vport->rcv_buffer_list) || 11336 time_before(jiffies, timeout)) 11337 return; 11338 /* start with the oldest sequence on the rcv list */ 11339 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 11340 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11341 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 11342 dmabuf->time_stamp); 11343 if (time_before(jiffies, timeout)) 11344 break; 11345 abort_count++; 11346 list_del_init(&dmabuf->hbuf.list); 11347 list_for_each_entry_safe(d_buf, dnext, 11348 &dmabuf->dbuf.list, list) { 11349 list_del_init(&d_buf->list); 11350 lpfc_in_buf_free(vport->phba, d_buf); 11351 } 11352 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 11353 } 11354 if (abort_count) 11355 lpfc_update_rcv_time_stamp(vport); 11356 } 11357 11358 /** 11359 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 11360 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 11361 * 11362 * This function searches through the existing incomplete sequences that have 11363 * been sent to this @vport. If the frame matches one of the incomplete 11364 * sequences then the dbuf in the @dmabuf is added to the list of frames that 11365 * make up that sequence. If no sequence is found that matches this frame then 11366 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 11367 * This function returns a pointer to the first dmabuf in the sequence list that 11368 * the frame was linked to. 11369 **/ 11370 static struct hbq_dmabuf * 11371 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 11372 { 11373 struct fc_frame_header *new_hdr; 11374 struct fc_frame_header *temp_hdr; 11375 struct lpfc_dmabuf *d_buf; 11376 struct lpfc_dmabuf *h_buf; 11377 struct hbq_dmabuf *seq_dmabuf = NULL; 11378 struct hbq_dmabuf *temp_dmabuf = NULL; 11379 11380 INIT_LIST_HEAD(&dmabuf->dbuf.list); 11381 dmabuf->time_stamp = jiffies; 11382 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11383 /* Use the hdr_buf to find the sequence that this frame belongs to */ 11384 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11385 temp_hdr = (struct fc_frame_header *)h_buf->virt; 11386 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 11387 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 11388 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 11389 continue; 11390 /* found a pending sequence that matches this frame */ 11391 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11392 break; 11393 } 11394 if (!seq_dmabuf) { 11395 /* 11396 * This indicates first frame received for this sequence. 11397 * Queue the buffer on the vport's rcv_buffer_list. 11398 */ 11399 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11400 lpfc_update_rcv_time_stamp(vport); 11401 return dmabuf; 11402 } 11403 temp_hdr = seq_dmabuf->hbuf.virt; 11404 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 11405 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 11406 list_del_init(&seq_dmabuf->hbuf.list); 11407 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11408 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 11409 lpfc_update_rcv_time_stamp(vport); 11410 return dmabuf; 11411 } 11412 /* move this sequence to the tail to indicate a young sequence */ 11413 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 11414 seq_dmabuf->time_stamp = jiffies; 11415 lpfc_update_rcv_time_stamp(vport); 11416 if (list_empty(&seq_dmabuf->dbuf.list)) { 11417 temp_hdr = dmabuf->hbuf.virt; 11418 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 11419 return seq_dmabuf; 11420 } 11421 /* find the correct place in the sequence to insert this frame */ 11422 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 11423 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11424 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 11425 /* 11426 * If the frame's sequence count is greater than the frame on 11427 * the list then insert the frame right after this frame 11428 */ 11429 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 11430 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 11431 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 11432 return seq_dmabuf; 11433 } 11434 } 11435 return NULL; 11436 } 11437 11438 /** 11439 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 11440 * @vport: pointer to a vitural port 11441 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11442 * 11443 * This function tries to abort from the partially assembed sequence, described 11444 * by the information from basic abbort @dmabuf. It checks to see whether such 11445 * partially assembled sequence held by the driver. If so, it shall free up all 11446 * the frames from the partially assembled sequence. 11447 * 11448 * Return 11449 * true -- if there is matching partially assembled sequence present and all 11450 * the frames freed with the sequence; 11451 * false -- if there is no matching partially assembled sequence present so 11452 * nothing got aborted in the lower layer driver 11453 **/ 11454 static bool 11455 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 11456 struct hbq_dmabuf *dmabuf) 11457 { 11458 struct fc_frame_header *new_hdr; 11459 struct fc_frame_header *temp_hdr; 11460 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 11461 struct hbq_dmabuf *seq_dmabuf = NULL; 11462 11463 /* Use the hdr_buf to find the sequence that matches this frame */ 11464 INIT_LIST_HEAD(&dmabuf->dbuf.list); 11465 INIT_LIST_HEAD(&dmabuf->hbuf.list); 11466 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11467 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11468 temp_hdr = (struct fc_frame_header *)h_buf->virt; 11469 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 11470 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 11471 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 11472 continue; 11473 /* found a pending sequence that matches this frame */ 11474 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11475 break; 11476 } 11477 11478 /* Free up all the frames from the partially assembled sequence */ 11479 if (seq_dmabuf) { 11480 list_for_each_entry_safe(d_buf, n_buf, 11481 &seq_dmabuf->dbuf.list, list) { 11482 list_del_init(&d_buf->list); 11483 lpfc_in_buf_free(vport->phba, d_buf); 11484 } 11485 return true; 11486 } 11487 return false; 11488 } 11489 11490 /** 11491 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler 11492 * @phba: Pointer to HBA context object. 11493 * @cmd_iocbq: pointer to the command iocbq structure. 11494 * @rsp_iocbq: pointer to the response iocbq structure. 11495 * 11496 * This function handles the sequence abort accept iocb command complete 11497 * event. It properly releases the memory allocated to the sequence abort 11498 * accept iocb. 11499 **/ 11500 static void 11501 lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, 11502 struct lpfc_iocbq *cmd_iocbq, 11503 struct lpfc_iocbq *rsp_iocbq) 11504 { 11505 if (cmd_iocbq) 11506 lpfc_sli_release_iocbq(phba, cmd_iocbq); 11507 } 11508 11509 /** 11510 * lpfc_sli4_seq_abort_acc - Accept sequence abort 11511 * @phba: Pointer to HBA context object. 11512 * @fc_hdr: pointer to a FC frame header. 11513 * 11514 * This function sends a basic accept to a previous unsol sequence abort 11515 * event after aborting the sequence handling. 11516 **/ 11517 static void 11518 lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, 11519 struct fc_frame_header *fc_hdr) 11520 { 11521 struct lpfc_iocbq *ctiocb = NULL; 11522 struct lpfc_nodelist *ndlp; 11523 uint16_t oxid, rxid; 11524 uint32_t sid, fctl; 11525 IOCB_t *icmd; 11526 11527 if (!lpfc_is_link_up(phba)) 11528 return; 11529 11530 sid = sli4_sid_from_fc_hdr(fc_hdr); 11531 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 11532 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 11533 11534 ndlp = lpfc_findnode_did(phba->pport, sid); 11535 if (!ndlp) { 11536 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 11537 "1268 Find ndlp returned NULL for oxid:x%x " 11538 "SID:x%x\n", oxid, sid); 11539 return; 11540 } 11541 11542 /* Allocate buffer for acc iocb */ 11543 ctiocb = lpfc_sli_get_iocbq(phba); 11544 if (!ctiocb) 11545 return; 11546 11547 /* Extract the F_CTL field from FC_HDR */ 11548 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 11549 11550 icmd = &ctiocb->iocb; 11551 icmd->un.xseq64.bdl.bdeSize = 0; 11552 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 11553 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 11554 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 11555 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 11556 11557 /* Fill in the rest of iocb fields */ 11558 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 11559 icmd->ulpBdeCount = 0; 11560 icmd->ulpLe = 1; 11561 icmd->ulpClass = CLASS3; 11562 icmd->ulpContext = ndlp->nlp_rpi; 11563 11564 ctiocb->iocb_cmpl = NULL; 11565 ctiocb->vport = phba->pport; 11566 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; 11567 11568 if (fctl & FC_FC_EX_CTX) { 11569 /* ABTS sent by responder to CT exchange, construction 11570 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 11571 * field and RX_ID from ABTS for RX_ID field. 11572 */ 11573 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); 11574 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); 11575 ctiocb->sli4_xritag = oxid; 11576 } else { 11577 /* ABTS sent by initiator to CT exchange, construction 11578 * of BA_ACC will need to allocate a new XRI as for the 11579 * XRI_TAG and RX_ID fields. 11580 */ 11581 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); 11582 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); 11583 ctiocb->sli4_xritag = NO_XRI; 11584 } 11585 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); 11586 11587 /* Xmit CT abts accept on exchange <xid> */ 11588 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11589 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", 11590 CMD_XMIT_BLS_RSP64_CX, phba->link_state); 11591 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 11592 } 11593 11594 /** 11595 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 11596 * @vport: Pointer to the vport on which this sequence was received 11597 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11598 * 11599 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 11600 * receive sequence is only partially assembed by the driver, it shall abort 11601 * the partially assembled frames for the sequence. Otherwise, if the 11602 * unsolicited receive sequence has been completely assembled and passed to 11603 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 11604 * unsolicited sequence has been aborted. After that, it will issue a basic 11605 * accept to accept the abort. 11606 **/ 11607 void 11608 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 11609 struct hbq_dmabuf *dmabuf) 11610 { 11611 struct lpfc_hba *phba = vport->phba; 11612 struct fc_frame_header fc_hdr; 11613 uint32_t fctl; 11614 bool abts_par; 11615 11616 /* Make a copy of fc_hdr before the dmabuf being released */ 11617 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 11618 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 11619 11620 if (fctl & FC_FC_EX_CTX) { 11621 /* 11622 * ABTS sent by responder to exchange, just free the buffer 11623 */ 11624 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11625 } else { 11626 /* 11627 * ABTS sent by initiator to exchange, need to do cleanup 11628 */ 11629 /* Try to abort partially assembled seq */ 11630 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); 11631 11632 /* Send abort to ULP if partially seq abort failed */ 11633 if (abts_par == false) 11634 lpfc_sli4_send_seq_to_ulp(vport, dmabuf); 11635 else 11636 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11637 } 11638 /* Send basic accept (BA_ACC) to the abort requester */ 11639 lpfc_sli4_seq_abort_acc(phba, &fc_hdr); 11640 } 11641 11642 /** 11643 * lpfc_seq_complete - Indicates if a sequence is complete 11644 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11645 * 11646 * This function checks the sequence, starting with the frame described by 11647 * @dmabuf, to see if all the frames associated with this sequence are present. 11648 * the frames associated with this sequence are linked to the @dmabuf using the 11649 * dbuf list. This function looks for two major things. 1) That the first frame 11650 * has a sequence count of zero. 2) There is a frame with last frame of sequence 11651 * set. 3) That there are no holes in the sequence count. The function will 11652 * return 1 when the sequence is complete, otherwise it will return 0. 11653 **/ 11654 static int 11655 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 11656 { 11657 struct fc_frame_header *hdr; 11658 struct lpfc_dmabuf *d_buf; 11659 struct hbq_dmabuf *seq_dmabuf; 11660 uint32_t fctl; 11661 int seq_count = 0; 11662 11663 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11664 /* make sure first fame of sequence has a sequence count of zero */ 11665 if (hdr->fh_seq_cnt != seq_count) 11666 return 0; 11667 fctl = (hdr->fh_f_ctl[0] << 16 | 11668 hdr->fh_f_ctl[1] << 8 | 11669 hdr->fh_f_ctl[2]); 11670 /* If last frame of sequence we can return success. */ 11671 if (fctl & FC_FC_END_SEQ) 11672 return 1; 11673 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 11674 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11675 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11676 /* If there is a hole in the sequence count then fail. */ 11677 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 11678 return 0; 11679 fctl = (hdr->fh_f_ctl[0] << 16 | 11680 hdr->fh_f_ctl[1] << 8 | 11681 hdr->fh_f_ctl[2]); 11682 /* If last frame of sequence we can return success. */ 11683 if (fctl & FC_FC_END_SEQ) 11684 return 1; 11685 } 11686 return 0; 11687 } 11688 11689 /** 11690 * lpfc_prep_seq - Prep sequence for ULP processing 11691 * @vport: Pointer to the vport on which this sequence was received 11692 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11693 * 11694 * This function takes a sequence, described by a list of frames, and creates 11695 * a list of iocbq structures to describe the sequence. This iocbq list will be 11696 * used to issue to the generic unsolicited sequence handler. This routine 11697 * returns a pointer to the first iocbq in the list. If the function is unable 11698 * to allocate an iocbq then it throw out the received frames that were not 11699 * able to be described and return a pointer to the first iocbq. If unable to 11700 * allocate any iocbqs (including the first) this function will return NULL. 11701 **/ 11702 static struct lpfc_iocbq * 11703 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 11704 { 11705 struct lpfc_dmabuf *d_buf, *n_buf; 11706 struct lpfc_iocbq *first_iocbq, *iocbq; 11707 struct fc_frame_header *fc_hdr; 11708 uint32_t sid; 11709 struct ulp_bde64 *pbde; 11710 11711 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11712 /* remove from receive buffer list */ 11713 list_del_init(&seq_dmabuf->hbuf.list); 11714 lpfc_update_rcv_time_stamp(vport); 11715 /* get the Remote Port's SID */ 11716 sid = sli4_sid_from_fc_hdr(fc_hdr); 11717 /* Get an iocbq struct to fill in. */ 11718 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 11719 if (first_iocbq) { 11720 /* Initialize the first IOCB. */ 11721 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 11722 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 11723 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 11724 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 11725 first_iocbq->iocb.unsli3.rcvsli3.vpi = 11726 vport->vpi + vport->phba->vpi_base; 11727 /* put the first buffer into the first IOCBq */ 11728 first_iocbq->context2 = &seq_dmabuf->dbuf; 11729 first_iocbq->context3 = NULL; 11730 first_iocbq->iocb.ulpBdeCount = 1; 11731 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11732 LPFC_DATA_BUF_SIZE; 11733 first_iocbq->iocb.un.rcvels.remoteID = sid; 11734 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11735 bf_get(lpfc_rcqe_length, 11736 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11737 } 11738 iocbq = first_iocbq; 11739 /* 11740 * Each IOCBq can have two Buffers assigned, so go through the list 11741 * of buffers for this sequence and save two buffers in each IOCBq 11742 */ 11743 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 11744 if (!iocbq) { 11745 lpfc_in_buf_free(vport->phba, d_buf); 11746 continue; 11747 } 11748 if (!iocbq->context3) { 11749 iocbq->context3 = d_buf; 11750 iocbq->iocb.ulpBdeCount++; 11751 pbde = (struct ulp_bde64 *) 11752 &iocbq->iocb.unsli3.sli3Words[4]; 11753 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 11754 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11755 bf_get(lpfc_rcqe_length, 11756 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11757 } else { 11758 iocbq = lpfc_sli_get_iocbq(vport->phba); 11759 if (!iocbq) { 11760 if (first_iocbq) { 11761 first_iocbq->iocb.ulpStatus = 11762 IOSTAT_FCP_RSP_ERROR; 11763 first_iocbq->iocb.un.ulpWord[4] = 11764 IOERR_NO_RESOURCES; 11765 } 11766 lpfc_in_buf_free(vport->phba, d_buf); 11767 continue; 11768 } 11769 iocbq->context2 = d_buf; 11770 iocbq->context3 = NULL; 11771 iocbq->iocb.ulpBdeCount = 1; 11772 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11773 LPFC_DATA_BUF_SIZE; 11774 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11775 bf_get(lpfc_rcqe_length, 11776 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11777 iocbq->iocb.un.rcvels.remoteID = sid; 11778 list_add_tail(&iocbq->list, &first_iocbq->list); 11779 } 11780 } 11781 return first_iocbq; 11782 } 11783 11784 static void 11785 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 11786 struct hbq_dmabuf *seq_dmabuf) 11787 { 11788 struct fc_frame_header *fc_hdr; 11789 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 11790 struct lpfc_hba *phba = vport->phba; 11791 11792 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11793 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 11794 if (!iocbq) { 11795 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11796 "2707 Ring %d handler: Failed to allocate " 11797 "iocb Rctl x%x Type x%x received\n", 11798 LPFC_ELS_RING, 11799 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 11800 return; 11801 } 11802 if (!lpfc_complete_unsol_iocb(phba, 11803 &phba->sli.ring[LPFC_ELS_RING], 11804 iocbq, fc_hdr->fh_r_ctl, 11805 fc_hdr->fh_type)) 11806 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11807 "2540 Ring %d handler: unexpected Rctl " 11808 "x%x Type x%x received\n", 11809 LPFC_ELS_RING, 11810 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 11811 11812 /* Free iocb created in lpfc_prep_seq */ 11813 list_for_each_entry_safe(curr_iocb, next_iocb, 11814 &iocbq->list, list) { 11815 list_del_init(&curr_iocb->list); 11816 lpfc_sli_release_iocbq(phba, curr_iocb); 11817 } 11818 lpfc_sli_release_iocbq(phba, iocbq); 11819 } 11820 11821 /** 11822 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 11823 * @phba: Pointer to HBA context object. 11824 * 11825 * This function is called with no lock held. This function processes all 11826 * the received buffers and gives it to upper layers when a received buffer 11827 * indicates that it is the final frame in the sequence. The interrupt 11828 * service routine processes received buffers at interrupt contexts and adds 11829 * received dma buffers to the rb_pend_list queue and signals the worker thread. 11830 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 11831 * appropriate receive function when the final frame in a sequence is received. 11832 **/ 11833 void 11834 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 11835 struct hbq_dmabuf *dmabuf) 11836 { 11837 struct hbq_dmabuf *seq_dmabuf; 11838 struct fc_frame_header *fc_hdr; 11839 struct lpfc_vport *vport; 11840 uint32_t fcfi; 11841 11842 /* Process each received buffer */ 11843 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11844 /* check to see if this a valid type of frame */ 11845 if (lpfc_fc_frame_check(phba, fc_hdr)) { 11846 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11847 return; 11848 } 11849 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); 11850 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 11851 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 11852 /* throw out the frame */ 11853 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11854 return; 11855 } 11856 /* Handle the basic abort sequence (BA_ABTS) event */ 11857 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 11858 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 11859 return; 11860 } 11861 11862 /* Link this frame */ 11863 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 11864 if (!seq_dmabuf) { 11865 /* unable to add frame to vport - throw it out */ 11866 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11867 return; 11868 } 11869 /* If not last frame in sequence continue processing frames. */ 11870 if (!lpfc_seq_complete(seq_dmabuf)) 11871 return; 11872 11873 /* Send the complete sequence to the upper layer protocol */ 11874 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 11875 } 11876 11877 /** 11878 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 11879 * @phba: pointer to lpfc hba data structure. 11880 * 11881 * This routine is invoked to post rpi header templates to the 11882 * HBA consistent with the SLI-4 interface spec. This routine 11883 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 11884 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 11885 * 11886 * This routine does not require any locks. It's usage is expected 11887 * to be driver load or reset recovery when the driver is 11888 * sequential. 11889 * 11890 * Return codes 11891 * 0 - successful 11892 * -EIO - The mailbox failed to complete successfully. 11893 * When this error occurs, the driver is not guaranteed 11894 * to have any rpi regions posted to the device and 11895 * must either attempt to repost the regions or take a 11896 * fatal error. 11897 **/ 11898 int 11899 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 11900 { 11901 struct lpfc_rpi_hdr *rpi_page; 11902 uint32_t rc = 0; 11903 11904 /* Post all rpi memory regions to the port. */ 11905 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 11906 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 11907 if (rc != MBX_SUCCESS) { 11908 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11909 "2008 Error %d posting all rpi " 11910 "headers\n", rc); 11911 rc = -EIO; 11912 break; 11913 } 11914 } 11915 11916 return rc; 11917 } 11918 11919 /** 11920 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 11921 * @phba: pointer to lpfc hba data structure. 11922 * @rpi_page: pointer to the rpi memory region. 11923 * 11924 * This routine is invoked to post a single rpi header to the 11925 * HBA consistent with the SLI-4 interface spec. This memory region 11926 * maps up to 64 rpi context regions. 11927 * 11928 * Return codes 11929 * 0 - successful 11930 * -ENOMEM - No available memory 11931 * -EIO - The mailbox failed to complete successfully. 11932 **/ 11933 int 11934 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 11935 { 11936 LPFC_MBOXQ_t *mboxq; 11937 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 11938 uint32_t rc = 0; 11939 uint32_t mbox_tmo; 11940 uint32_t shdr_status, shdr_add_status; 11941 union lpfc_sli4_cfg_shdr *shdr; 11942 11943 /* The port is notified of the header region via a mailbox command. */ 11944 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11945 if (!mboxq) { 11946 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11947 "2001 Unable to allocate memory for issuing " 11948 "SLI_CONFIG_SPECIAL mailbox command\n"); 11949 return -ENOMEM; 11950 } 11951 11952 /* Post all rpi memory regions to the port. */ 11953 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 11954 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 11955 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 11956 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 11957 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 11958 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); 11959 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 11960 hdr_tmpl, rpi_page->page_count); 11961 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 11962 rpi_page->start_rpi); 11963 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 11964 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 11965 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 11966 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 11967 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11968 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11969 if (rc != MBX_TIMEOUT) 11970 mempool_free(mboxq, phba->mbox_mem_pool); 11971 if (shdr_status || shdr_add_status || rc) { 11972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11973 "2514 POST_RPI_HDR mailbox failed with " 11974 "status x%x add_status x%x, mbx status x%x\n", 11975 shdr_status, shdr_add_status, rc); 11976 rc = -ENXIO; 11977 } 11978 return rc; 11979 } 11980 11981 /** 11982 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 11983 * @phba: pointer to lpfc hba data structure. 11984 * 11985 * This routine is invoked to post rpi header templates to the 11986 * HBA consistent with the SLI-4 interface spec. This routine 11987 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 11988 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 11989 * 11990 * Returns 11991 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 11992 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 11993 **/ 11994 int 11995 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 11996 { 11997 int rpi; 11998 uint16_t max_rpi, rpi_base, rpi_limit; 11999 uint16_t rpi_remaining; 12000 struct lpfc_rpi_hdr *rpi_hdr; 12001 12002 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 12003 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base; 12004 rpi_limit = phba->sli4_hba.next_rpi; 12005 12006 /* 12007 * The valid rpi range is not guaranteed to be zero-based. Start 12008 * the search at the rpi_base as reported by the port. 12009 */ 12010 spin_lock_irq(&phba->hbalock); 12011 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); 12012 if (rpi >= rpi_limit || rpi < rpi_base) 12013 rpi = LPFC_RPI_ALLOC_ERROR; 12014 else { 12015 set_bit(rpi, phba->sli4_hba.rpi_bmask); 12016 phba->sli4_hba.max_cfg_param.rpi_used++; 12017 phba->sli4_hba.rpi_count++; 12018 } 12019 12020 /* 12021 * Don't try to allocate more rpi header regions if the device limit 12022 * on available rpis max has been exhausted. 12023 */ 12024 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 12025 (phba->sli4_hba.rpi_count >= max_rpi)) { 12026 spin_unlock_irq(&phba->hbalock); 12027 return rpi; 12028 } 12029 12030 /* 12031 * If the driver is running low on rpi resources, allocate another 12032 * page now. Note that the next_rpi value is used because 12033 * it represents how many are actually in use whereas max_rpi notes 12034 * how many are supported max by the device. 12035 */ 12036 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - 12037 phba->sli4_hba.rpi_count; 12038 spin_unlock_irq(&phba->hbalock); 12039 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 12040 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 12041 if (!rpi_hdr) { 12042 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12043 "2002 Error Could not grow rpi " 12044 "count\n"); 12045 } else { 12046 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 12047 } 12048 } 12049 12050 return rpi; 12051 } 12052 12053 /** 12054 * lpfc_sli4_free_rpi - Release an rpi for reuse. 12055 * @phba: pointer to lpfc hba data structure. 12056 * 12057 * This routine is invoked to release an rpi to the pool of 12058 * available rpis maintained by the driver. 12059 **/ 12060 void 12061 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 12062 { 12063 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 12064 phba->sli4_hba.rpi_count--; 12065 phba->sli4_hba.max_cfg_param.rpi_used--; 12066 } 12067 } 12068 12069 /** 12070 * lpfc_sli4_free_rpi - Release an rpi for reuse. 12071 * @phba: pointer to lpfc hba data structure. 12072 * 12073 * This routine is invoked to release an rpi to the pool of 12074 * available rpis maintained by the driver. 12075 **/ 12076 void 12077 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 12078 { 12079 spin_lock_irq(&phba->hbalock); 12080 __lpfc_sli4_free_rpi(phba, rpi); 12081 spin_unlock_irq(&phba->hbalock); 12082 } 12083 12084 /** 12085 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 12086 * @phba: pointer to lpfc hba data structure. 12087 * 12088 * This routine is invoked to remove the memory region that 12089 * provided rpi via a bitmask. 12090 **/ 12091 void 12092 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 12093 { 12094 kfree(phba->sli4_hba.rpi_bmask); 12095 } 12096 12097 /** 12098 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 12099 * @phba: pointer to lpfc hba data structure. 12100 * 12101 * This routine is invoked to remove the memory region that 12102 * provided rpi via a bitmask. 12103 **/ 12104 int 12105 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) 12106 { 12107 LPFC_MBOXQ_t *mboxq; 12108 struct lpfc_hba *phba = ndlp->phba; 12109 int rc; 12110 12111 /* The port is notified of the header region via a mailbox command. */ 12112 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12113 if (!mboxq) 12114 return -ENOMEM; 12115 12116 /* Post all rpi memory regions to the port. */ 12117 lpfc_resume_rpi(mboxq, ndlp); 12118 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12119 if (rc == MBX_NOT_FINISHED) { 12120 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12121 "2010 Resume RPI Mailbox failed " 12122 "status %d, mbxStatus x%x\n", rc, 12123 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 12124 mempool_free(mboxq, phba->mbox_mem_pool); 12125 return -EIO; 12126 } 12127 return 0; 12128 } 12129 12130 /** 12131 * lpfc_sli4_init_vpi - Initialize a vpi with the port 12132 * @phba: pointer to lpfc hba data structure. 12133 * @vpi: vpi value to activate with the port. 12134 * 12135 * This routine is invoked to activate a vpi with the 12136 * port when the host intends to use vports with a 12137 * nonzero vpi. 12138 * 12139 * Returns: 12140 * 0 success 12141 * -Evalue otherwise 12142 **/ 12143 int 12144 lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) 12145 { 12146 LPFC_MBOXQ_t *mboxq; 12147 int rc = 0; 12148 int retval = MBX_SUCCESS; 12149 uint32_t mbox_tmo; 12150 12151 if (vpi == 0) 12152 return -EINVAL; 12153 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12154 if (!mboxq) 12155 return -ENOMEM; 12156 lpfc_init_vpi(phba, mboxq, vpi); 12157 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 12158 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12159 if (rc != MBX_SUCCESS) { 12160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12161 "2022 INIT VPI Mailbox failed " 12162 "status %d, mbxStatus x%x\n", rc, 12163 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 12164 retval = -EIO; 12165 } 12166 if (rc != MBX_TIMEOUT) 12167 mempool_free(mboxq, phba->mbox_mem_pool); 12168 12169 return retval; 12170 } 12171 12172 /** 12173 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 12174 * @phba: pointer to lpfc hba data structure. 12175 * @mboxq: Pointer to mailbox object. 12176 * 12177 * This routine is invoked to manually add a single FCF record. The caller 12178 * must pass a completely initialized FCF_Record. This routine takes 12179 * care of the nonembedded mailbox operations. 12180 **/ 12181 static void 12182 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 12183 { 12184 void *virt_addr; 12185 union lpfc_sli4_cfg_shdr *shdr; 12186 uint32_t shdr_status, shdr_add_status; 12187 12188 virt_addr = mboxq->sge_array->addr[0]; 12189 /* The IOCTL status is embedded in the mailbox subheader. */ 12190 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 12191 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12192 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12193 12194 if ((shdr_status || shdr_add_status) && 12195 (shdr_status != STATUS_FCF_IN_USE)) 12196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12197 "2558 ADD_FCF_RECORD mailbox failed with " 12198 "status x%x add_status x%x\n", 12199 shdr_status, shdr_add_status); 12200 12201 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12202 } 12203 12204 /** 12205 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 12206 * @phba: pointer to lpfc hba data structure. 12207 * @fcf_record: pointer to the initialized fcf record to add. 12208 * 12209 * This routine is invoked to manually add a single FCF record. The caller 12210 * must pass a completely initialized FCF_Record. This routine takes 12211 * care of the nonembedded mailbox operations. 12212 **/ 12213 int 12214 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 12215 { 12216 int rc = 0; 12217 LPFC_MBOXQ_t *mboxq; 12218 uint8_t *bytep; 12219 void *virt_addr; 12220 dma_addr_t phys_addr; 12221 struct lpfc_mbx_sge sge; 12222 uint32_t alloc_len, req_len; 12223 uint32_t fcfindex; 12224 12225 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12226 if (!mboxq) { 12227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12228 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 12229 return -ENOMEM; 12230 } 12231 12232 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 12233 sizeof(uint32_t); 12234 12235 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12236 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 12237 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 12238 req_len, LPFC_SLI4_MBX_NEMBED); 12239 if (alloc_len < req_len) { 12240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12241 "2523 Allocated DMA memory size (x%x) is " 12242 "less than the requested DMA memory " 12243 "size (x%x)\n", alloc_len, req_len); 12244 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12245 return -ENOMEM; 12246 } 12247 12248 /* 12249 * Get the first SGE entry from the non-embedded DMA memory. This 12250 * routine only uses a single SGE. 12251 */ 12252 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 12253 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 12254 virt_addr = mboxq->sge_array->addr[0]; 12255 /* 12256 * Configure the FCF record for FCFI 0. This is the driver's 12257 * hardcoded default and gets used in nonFIP mode. 12258 */ 12259 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 12260 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 12261 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 12262 12263 /* 12264 * Copy the fcf_index and the FCF Record Data. The data starts after 12265 * the FCoE header plus word10. The data copy needs to be endian 12266 * correct. 12267 */ 12268 bytep += sizeof(uint32_t); 12269 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 12270 mboxq->vport = phba->pport; 12271 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 12272 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12273 if (rc == MBX_NOT_FINISHED) { 12274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12275 "2515 ADD_FCF_RECORD mailbox failed with " 12276 "status 0x%x\n", rc); 12277 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12278 rc = -EIO; 12279 } else 12280 rc = 0; 12281 12282 return rc; 12283 } 12284 12285 /** 12286 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 12287 * @phba: pointer to lpfc hba data structure. 12288 * @fcf_record: pointer to the fcf record to write the default data. 12289 * @fcf_index: FCF table entry index. 12290 * 12291 * This routine is invoked to build the driver's default FCF record. The 12292 * values used are hardcoded. This routine handles memory initialization. 12293 * 12294 **/ 12295 void 12296 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 12297 struct fcf_record *fcf_record, 12298 uint16_t fcf_index) 12299 { 12300 memset(fcf_record, 0, sizeof(struct fcf_record)); 12301 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 12302 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 12303 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 12304 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 12305 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 12306 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 12307 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 12308 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 12309 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 12310 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 12311 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 12312 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 12313 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 12314 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 12315 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 12316 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 12317 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 12318 /* Set the VLAN bit map */ 12319 if (phba->valid_vlan) { 12320 fcf_record->vlan_bitmap[phba->vlan_id / 8] 12321 = 1 << (phba->vlan_id % 8); 12322 } 12323 } 12324 12325 /** 12326 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 12327 * @phba: pointer to lpfc hba data structure. 12328 * @fcf_index: FCF table entry offset. 12329 * 12330 * This routine is invoked to scan the entire FCF table by reading FCF 12331 * record and processing it one at a time starting from the @fcf_index 12332 * for initial FCF discovery or fast FCF failover rediscovery. 12333 * 12334 * Return 0 if the mailbox command is submitted sucessfully, none 0 12335 * otherwise. 12336 **/ 12337 int 12338 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12339 { 12340 int rc = 0, error; 12341 LPFC_MBOXQ_t *mboxq; 12342 12343 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 12344 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12345 if (!mboxq) { 12346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12347 "2000 Failed to allocate mbox for " 12348 "READ_FCF cmd\n"); 12349 error = -ENOMEM; 12350 goto fail_fcf_scan; 12351 } 12352 /* Construct the read FCF record mailbox command */ 12353 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12354 if (rc) { 12355 error = -EINVAL; 12356 goto fail_fcf_scan; 12357 } 12358 /* Issue the mailbox command asynchronously */ 12359 mboxq->vport = phba->pport; 12360 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12361 12362 spin_lock_irq(&phba->hbalock); 12363 phba->hba_flag |= FCF_TS_INPROG; 12364 spin_unlock_irq(&phba->hbalock); 12365 12366 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12367 if (rc == MBX_NOT_FINISHED) 12368 error = -EIO; 12369 else { 12370 /* Reset eligible FCF count for new scan */ 12371 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12372 phba->fcf.eligible_fcf_cnt = 0; 12373 error = 0; 12374 } 12375 fail_fcf_scan: 12376 if (error) { 12377 if (mboxq) 12378 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12379 /* FCF scan failed, clear FCF_TS_INPROG flag */ 12380 spin_lock_irq(&phba->hbalock); 12381 phba->hba_flag &= ~FCF_TS_INPROG; 12382 spin_unlock_irq(&phba->hbalock); 12383 } 12384 return error; 12385 } 12386 12387 /** 12388 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 12389 * @phba: pointer to lpfc hba data structure. 12390 * @fcf_index: FCF table entry offset. 12391 * 12392 * This routine is invoked to read an FCF record indicated by @fcf_index 12393 * and to use it for FLOGI roundrobin FCF failover. 12394 * 12395 * Return 0 if the mailbox command is submitted sucessfully, none 0 12396 * otherwise. 12397 **/ 12398 int 12399 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12400 { 12401 int rc = 0, error; 12402 LPFC_MBOXQ_t *mboxq; 12403 12404 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12405 if (!mboxq) { 12406 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12407 "2763 Failed to allocate mbox for " 12408 "READ_FCF cmd\n"); 12409 error = -ENOMEM; 12410 goto fail_fcf_read; 12411 } 12412 /* Construct the read FCF record mailbox command */ 12413 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12414 if (rc) { 12415 error = -EINVAL; 12416 goto fail_fcf_read; 12417 } 12418 /* Issue the mailbox command asynchronously */ 12419 mboxq->vport = phba->pport; 12420 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 12421 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12422 if (rc == MBX_NOT_FINISHED) 12423 error = -EIO; 12424 else 12425 error = 0; 12426 12427 fail_fcf_read: 12428 if (error && mboxq) 12429 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12430 return error; 12431 } 12432 12433 /** 12434 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 12435 * @phba: pointer to lpfc hba data structure. 12436 * @fcf_index: FCF table entry offset. 12437 * 12438 * This routine is invoked to read an FCF record indicated by @fcf_index to 12439 * determine whether it's eligible for FLOGI roundrobin failover list. 12440 * 12441 * Return 0 if the mailbox command is submitted sucessfully, none 0 12442 * otherwise. 12443 **/ 12444 int 12445 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12446 { 12447 int rc = 0, error; 12448 LPFC_MBOXQ_t *mboxq; 12449 12450 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12451 if (!mboxq) { 12452 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12453 "2758 Failed to allocate mbox for " 12454 "READ_FCF cmd\n"); 12455 error = -ENOMEM; 12456 goto fail_fcf_read; 12457 } 12458 /* Construct the read FCF record mailbox command */ 12459 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12460 if (rc) { 12461 error = -EINVAL; 12462 goto fail_fcf_read; 12463 } 12464 /* Issue the mailbox command asynchronously */ 12465 mboxq->vport = phba->pport; 12466 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 12467 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12468 if (rc == MBX_NOT_FINISHED) 12469 error = -EIO; 12470 else 12471 error = 0; 12472 12473 fail_fcf_read: 12474 if (error && mboxq) 12475 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12476 return error; 12477 } 12478 12479 /** 12480 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 12481 * @phba: pointer to lpfc hba data structure. 12482 * 12483 * This routine is to get the next eligible FCF record index in a round 12484 * robin fashion. If the next eligible FCF record index equals to the 12485 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12486 * shall be returned, otherwise, the next eligible FCF record's index 12487 * shall be returned. 12488 **/ 12489 uint16_t 12490 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 12491 { 12492 uint16_t next_fcf_index; 12493 12494 /* Search start from next bit of currently registered FCF index */ 12495 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 12496 LPFC_SLI4_FCF_TBL_INDX_MAX; 12497 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12498 LPFC_SLI4_FCF_TBL_INDX_MAX, 12499 next_fcf_index); 12500 12501 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 12502 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 12503 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12504 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 12505 12506 /* Check roundrobin failover list empty condition */ 12507 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12508 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 12509 "2844 No roundrobin failover FCF available\n"); 12510 return LPFC_FCOE_FCF_NEXT_NONE; 12511 } 12512 12513 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12514 "2845 Get next roundrobin failover FCF (x%x)\n", 12515 next_fcf_index); 12516 12517 return next_fcf_index; 12518 } 12519 12520 /** 12521 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 12522 * @phba: pointer to lpfc hba data structure. 12523 * 12524 * This routine sets the FCF record index in to the eligible bmask for 12525 * roundrobin failover search. It checks to make sure that the index 12526 * does not go beyond the range of the driver allocated bmask dimension 12527 * before setting the bit. 12528 * 12529 * Returns 0 if the index bit successfully set, otherwise, it returns 12530 * -EINVAL. 12531 **/ 12532 int 12533 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 12534 { 12535 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12536 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12537 "2610 FCF (x%x) reached driver's book " 12538 "keeping dimension:x%x\n", 12539 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12540 return -EINVAL; 12541 } 12542 /* Set the eligible FCF record index bmask */ 12543 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12544 12545 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12546 "2790 Set FCF (x%x) to roundrobin FCF failover " 12547 "bmask\n", fcf_index); 12548 12549 return 0; 12550 } 12551 12552 /** 12553 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 12554 * @phba: pointer to lpfc hba data structure. 12555 * 12556 * This routine clears the FCF record index from the eligible bmask for 12557 * roundrobin failover search. It checks to make sure that the index 12558 * does not go beyond the range of the driver allocated bmask dimension 12559 * before clearing the bit. 12560 **/ 12561 void 12562 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 12563 { 12564 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12565 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12566 "2762 FCF (x%x) reached driver's book " 12567 "keeping dimension:x%x\n", 12568 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12569 return; 12570 } 12571 /* Clear the eligible FCF record index bmask */ 12572 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12573 12574 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12575 "2791 Clear FCF (x%x) from roundrobin failover " 12576 "bmask\n", fcf_index); 12577 } 12578 12579 /** 12580 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 12581 * @phba: pointer to lpfc hba data structure. 12582 * 12583 * This routine is the completion routine for the rediscover FCF table mailbox 12584 * command. If the mailbox command returned failure, it will try to stop the 12585 * FCF rediscover wait timer. 12586 **/ 12587 void 12588 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 12589 { 12590 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12591 uint32_t shdr_status, shdr_add_status; 12592 12593 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 12594 12595 shdr_status = bf_get(lpfc_mbox_hdr_status, 12596 &redisc_fcf->header.cfg_shdr.response); 12597 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12598 &redisc_fcf->header.cfg_shdr.response); 12599 if (shdr_status || shdr_add_status) { 12600 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12601 "2746 Requesting for FCF rediscovery failed " 12602 "status x%x add_status x%x\n", 12603 shdr_status, shdr_add_status); 12604 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 12605 spin_lock_irq(&phba->hbalock); 12606 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 12607 spin_unlock_irq(&phba->hbalock); 12608 /* 12609 * CVL event triggered FCF rediscover request failed, 12610 * last resort to re-try current registered FCF entry. 12611 */ 12612 lpfc_retry_pport_discovery(phba); 12613 } else { 12614 spin_lock_irq(&phba->hbalock); 12615 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 12616 spin_unlock_irq(&phba->hbalock); 12617 /* 12618 * DEAD FCF event triggered FCF rediscover request 12619 * failed, last resort to fail over as a link down 12620 * to FCF registration. 12621 */ 12622 lpfc_sli4_fcf_dead_failthrough(phba); 12623 } 12624 } else { 12625 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12626 "2775 Start FCF rediscover quiescent timer\n"); 12627 /* 12628 * Start FCF rediscovery wait timer for pending FCF 12629 * before rescan FCF record table. 12630 */ 12631 lpfc_fcf_redisc_wait_start_timer(phba); 12632 } 12633 12634 mempool_free(mbox, phba->mbox_mem_pool); 12635 } 12636 12637 /** 12638 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 12639 * @phba: pointer to lpfc hba data structure. 12640 * 12641 * This routine is invoked to request for rediscovery of the entire FCF table 12642 * by the port. 12643 **/ 12644 int 12645 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 12646 { 12647 LPFC_MBOXQ_t *mbox; 12648 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12649 int rc, length; 12650 12651 /* Cancel retry delay timers to all vports before FCF rediscover */ 12652 lpfc_cancel_all_vport_retry_delay_timer(phba); 12653 12654 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12655 if (!mbox) { 12656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12657 "2745 Failed to allocate mbox for " 12658 "requesting FCF rediscover.\n"); 12659 return -ENOMEM; 12660 } 12661 12662 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 12663 sizeof(struct lpfc_sli4_cfg_mhdr)); 12664 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 12665 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 12666 length, LPFC_SLI4_MBX_EMBED); 12667 12668 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 12669 /* Set count to 0 for invalidating the entire FCF database */ 12670 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 12671 12672 /* Issue the mailbox command asynchronously */ 12673 mbox->vport = phba->pport; 12674 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 12675 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 12676 12677 if (rc == MBX_NOT_FINISHED) { 12678 mempool_free(mbox, phba->mbox_mem_pool); 12679 return -EIO; 12680 } 12681 return 0; 12682 } 12683 12684 /** 12685 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 12686 * @phba: pointer to lpfc hba data structure. 12687 * 12688 * This function is the failover routine as a last resort to the FCF DEAD 12689 * event when driver failed to perform fast FCF failover. 12690 **/ 12691 void 12692 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 12693 { 12694 uint32_t link_state; 12695 12696 /* 12697 * Last resort as FCF DEAD event failover will treat this as 12698 * a link down, but save the link state because we don't want 12699 * it to be changed to Link Down unless it is already down. 12700 */ 12701 link_state = phba->link_state; 12702 lpfc_linkdown(phba); 12703 phba->link_state = link_state; 12704 12705 /* Unregister FCF if no devices connected to it */ 12706 lpfc_unregister_unused_fcf(phba); 12707 } 12708 12709 /** 12710 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 12711 * @phba: pointer to lpfc hba data structure. 12712 * 12713 * This function read region 23 and parse TLV for port status to 12714 * decide if the user disaled the port. If the TLV indicates the 12715 * port is disabled, the hba_flag is set accordingly. 12716 **/ 12717 void 12718 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 12719 { 12720 LPFC_MBOXQ_t *pmb = NULL; 12721 MAILBOX_t *mb; 12722 uint8_t *rgn23_data = NULL; 12723 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset; 12724 int rc; 12725 12726 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12727 if (!pmb) { 12728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12729 "2600 lpfc_sli_read_serdes_param failed to" 12730 " allocate mailbox memory\n"); 12731 goto out; 12732 } 12733 mb = &pmb->u.mb; 12734 12735 /* Get adapter Region 23 data */ 12736 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 12737 if (!rgn23_data) 12738 goto out; 12739 12740 do { 12741 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 12742 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 12743 12744 if (rc != MBX_SUCCESS) { 12745 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 12746 "2601 lpfc_sli_read_link_ste failed to" 12747 " read config region 23 rc 0x%x Status 0x%x\n", 12748 rc, mb->mbxStatus); 12749 mb->un.varDmp.word_cnt = 0; 12750 } 12751 /* 12752 * dump mem may return a zero when finished or we got a 12753 * mailbox error, either way we are done. 12754 */ 12755 if (mb->un.varDmp.word_cnt == 0) 12756 break; 12757 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 12758 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 12759 12760 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 12761 rgn23_data + offset, 12762 mb->un.varDmp.word_cnt); 12763 offset += mb->un.varDmp.word_cnt; 12764 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 12765 12766 data_size = offset; 12767 offset = 0; 12768 12769 if (!data_size) 12770 goto out; 12771 12772 /* Check the region signature first */ 12773 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 12774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12775 "2619 Config region 23 has bad signature\n"); 12776 goto out; 12777 } 12778 offset += 4; 12779 12780 /* Check the data structure version */ 12781 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 12782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12783 "2620 Config region 23 has bad version\n"); 12784 goto out; 12785 } 12786 offset += 4; 12787 12788 /* Parse TLV entries in the region */ 12789 while (offset < data_size) { 12790 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 12791 break; 12792 /* 12793 * If the TLV is not driver specific TLV or driver id is 12794 * not linux driver id, skip the record. 12795 */ 12796 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 12797 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 12798 (rgn23_data[offset + 3] != 0)) { 12799 offset += rgn23_data[offset + 1] * 4 + 4; 12800 continue; 12801 } 12802 12803 /* Driver found a driver specific TLV in the config region */ 12804 sub_tlv_len = rgn23_data[offset + 1] * 4; 12805 offset += 4; 12806 tlv_offset = 0; 12807 12808 /* 12809 * Search for configured port state sub-TLV. 12810 */ 12811 while ((offset < data_size) && 12812 (tlv_offset < sub_tlv_len)) { 12813 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 12814 offset += 4; 12815 tlv_offset += 4; 12816 break; 12817 } 12818 if (rgn23_data[offset] != PORT_STE_TYPE) { 12819 offset += rgn23_data[offset + 1] * 4 + 4; 12820 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 12821 continue; 12822 } 12823 12824 /* This HBA contains PORT_STE configured */ 12825 if (!rgn23_data[offset + 2]) 12826 phba->hba_flag |= LINK_DISABLED; 12827 12828 goto out; 12829 } 12830 } 12831 out: 12832 if (pmb) 12833 mempool_free(pmb, phba->mbox_mem_pool); 12834 kfree(rgn23_data); 12835 return; 12836 } 12837 12838 /** 12839 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 12840 * @vport: pointer to vport data structure. 12841 * 12842 * This function iterate through the mailboxq and clean up all REG_LOGIN 12843 * and REG_VPI mailbox commands associated with the vport. This function 12844 * is called when driver want to restart discovery of the vport due to 12845 * a Clear Virtual Link event. 12846 **/ 12847 void 12848 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 12849 { 12850 struct lpfc_hba *phba = vport->phba; 12851 LPFC_MBOXQ_t *mb, *nextmb; 12852 struct lpfc_dmabuf *mp; 12853 struct lpfc_nodelist *ndlp; 12854 struct lpfc_nodelist *act_mbx_ndlp = NULL; 12855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 12856 LIST_HEAD(mbox_cmd_list); 12857 12858 /* Clean up internally queued mailbox commands with the vport */ 12859 spin_lock_irq(&phba->hbalock); 12860 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12861 if (mb->vport != vport) 12862 continue; 12863 12864 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 12865 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 12866 continue; 12867 12868 list_del(&mb->list); 12869 list_add_tail(&mb->list, &mbox_cmd_list); 12870 } 12871 /* Clean up active mailbox command with the vport */ 12872 mb = phba->sli.mbox_active; 12873 if (mb && (mb->vport == vport)) { 12874 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 12875 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 12876 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12877 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12878 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 12879 /* Put reference count for delayed processing */ 12880 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 12881 /* Unregister the RPI when mailbox complete */ 12882 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 12883 } 12884 } 12885 spin_unlock_irq(&phba->hbalock); 12886 12887 /* Release the cleaned-up mailbox commands */ 12888 while (!list_empty(&mbox_cmd_list)) { 12889 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 12890 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 12891 if (phba->sli_rev == LPFC_SLI_REV4) 12892 __lpfc_sli4_free_rpi(phba, 12893 mb->u.mb.un.varRegLogin.rpi); 12894 mp = (struct lpfc_dmabuf *) (mb->context1); 12895 if (mp) { 12896 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 12897 kfree(mp); 12898 } 12899 ndlp = (struct lpfc_nodelist *) mb->context2; 12900 mb->context2 = NULL; 12901 if (ndlp) { 12902 spin_lock(shost->host_lock); 12903 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 12904 spin_unlock(shost->host_lock); 12905 lpfc_nlp_put(ndlp); 12906 } 12907 } 12908 mempool_free(mb, phba->mbox_mem_pool); 12909 } 12910 12911 /* Release the ndlp with the cleaned-up active mailbox command */ 12912 if (act_mbx_ndlp) { 12913 spin_lock(shost->host_lock); 12914 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 12915 spin_unlock(shost->host_lock); 12916 lpfc_nlp_put(act_mbx_ndlp); 12917 } 12918 } 12919 12920 /** 12921 * lpfc_drain_txq - Drain the txq 12922 * @phba: Pointer to HBA context object. 12923 * 12924 * This function attempt to submit IOCBs on the txq 12925 * to the adapter. For SLI4 adapters, the txq contains 12926 * ELS IOCBs that have been deferred because the there 12927 * are no SGLs. This congestion can occur with large 12928 * vport counts during node discovery. 12929 **/ 12930 12931 uint32_t 12932 lpfc_drain_txq(struct lpfc_hba *phba) 12933 { 12934 LIST_HEAD(completions); 12935 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 12936 struct lpfc_iocbq *piocbq = 0; 12937 unsigned long iflags = 0; 12938 char *fail_msg = NULL; 12939 struct lpfc_sglq *sglq; 12940 union lpfc_wqe wqe; 12941 12942 spin_lock_irqsave(&phba->hbalock, iflags); 12943 if (pring->txq_cnt > pring->txq_max) 12944 pring->txq_max = pring->txq_cnt; 12945 12946 spin_unlock_irqrestore(&phba->hbalock, iflags); 12947 12948 while (pring->txq_cnt) { 12949 spin_lock_irqsave(&phba->hbalock, iflags); 12950 12951 sglq = __lpfc_sli_get_sglq(phba); 12952 if (!sglq) { 12953 spin_unlock_irqrestore(&phba->hbalock, iflags); 12954 break; 12955 } else { 12956 piocbq = lpfc_sli_ringtx_get(phba, pring); 12957 if (!piocbq) { 12958 /* The txq_cnt out of sync. This should 12959 * never happen 12960 */ 12961 sglq = __lpfc_clear_active_sglq(phba, 12962 sglq->sli4_xritag); 12963 spin_unlock_irqrestore(&phba->hbalock, iflags); 12964 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12965 "2823 txq empty and txq_cnt is %d\n ", 12966 pring->txq_cnt); 12967 break; 12968 } 12969 } 12970 12971 /* The xri and iocb resources secured, 12972 * attempt to issue request 12973 */ 12974 piocbq->sli4_xritag = sglq->sli4_xritag; 12975 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 12976 fail_msg = "to convert bpl to sgl"; 12977 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 12978 fail_msg = "to convert iocb to wqe"; 12979 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 12980 fail_msg = " - Wq is full"; 12981 else 12982 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 12983 12984 if (fail_msg) { 12985 /* Failed means we can't issue and need to cancel */ 12986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12987 "2822 IOCB failed %s iotag 0x%x " 12988 "xri 0x%x\n", 12989 fail_msg, 12990 piocbq->iotag, piocbq->sli4_xritag); 12991 list_add_tail(&piocbq->list, &completions); 12992 } 12993 spin_unlock_irqrestore(&phba->hbalock, iflags); 12994 } 12995 12996 /* Cancel all the IOCBs that cannot be issued */ 12997 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 12998 IOERR_SLI_ABORTED); 12999 13000 return pring->txq_cnt; 13001 } 13002