1 /* 2 * This file is part of the Chelsio FCoE driver for Linux. 3 * 4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/pci.h> 37 #include <linux/interrupt.h> 38 #include <linux/cpumask.h> 39 #include <linux/string.h> 40 41 #include "csio_init.h" 42 #include "csio_hw.h" 43 44 static irqreturn_t 45 csio_nondata_isr(int irq, void *dev_id) 46 { 47 struct csio_hw *hw = (struct csio_hw *) dev_id; 48 int rv; 49 unsigned long flags; 50 51 if (unlikely(!hw)) 52 return IRQ_NONE; 53 54 if (unlikely(pci_channel_offline(hw->pdev))) { 55 CSIO_INC_STATS(hw, n_pcich_offline); 56 return IRQ_NONE; 57 } 58 59 spin_lock_irqsave(&hw->lock, flags); 60 csio_hw_slow_intr_handler(hw); 61 rv = csio_mb_isr_handler(hw); 62 63 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { 64 hw->flags |= CSIO_HWF_FWEVT_PENDING; 65 spin_unlock_irqrestore(&hw->lock, flags); 66 schedule_work(&hw->evtq_work); 67 return IRQ_HANDLED; 68 } 69 spin_unlock_irqrestore(&hw->lock, flags); 70 return IRQ_HANDLED; 71 } 72 73 /* 74 * csio_fwevt_handler - Common FW event handler routine. 75 * @hw: HW module. 76 * 77 * This is the ISR for FW events. It is shared b/w MSIX 78 * and INTx handlers. 79 */ 80 static void 81 csio_fwevt_handler(struct csio_hw *hw) 82 { 83 int rv; 84 unsigned long flags; 85 86 rv = csio_fwevtq_handler(hw); 87 88 spin_lock_irqsave(&hw->lock, flags); 89 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { 90 hw->flags |= CSIO_HWF_FWEVT_PENDING; 91 spin_unlock_irqrestore(&hw->lock, flags); 92 schedule_work(&hw->evtq_work); 93 return; 94 } 95 spin_unlock_irqrestore(&hw->lock, flags); 96 97 } /* csio_fwevt_handler */ 98 99 /* 100 * csio_fwevt_isr() - FW events MSIX ISR 101 * @irq: 102 * @dev_id: 103 * 104 * Process WRs on the FW event queue. 105 * 106 */ 107 static irqreturn_t 108 csio_fwevt_isr(int irq, void *dev_id) 109 { 110 struct csio_hw *hw = (struct csio_hw *) dev_id; 111 112 if (unlikely(!hw)) 113 return IRQ_NONE; 114 115 if (unlikely(pci_channel_offline(hw->pdev))) { 116 CSIO_INC_STATS(hw, n_pcich_offline); 117 return IRQ_NONE; 118 } 119 120 csio_fwevt_handler(hw); 121 122 return IRQ_HANDLED; 123 } 124 125 /* 126 * csio_fwevt_isr() - INTx wrapper for handling FW events. 127 * @irq: 128 * @dev_id: 129 */ 130 void 131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, 132 struct csio_fl_dma_buf *flb, void *priv) 133 { 134 csio_fwevt_handler(hw); 135 } /* csio_fwevt_intx_handler */ 136 137 /* 138 * csio_process_scsi_cmpl - Process a SCSI WR completion. 139 * @hw: HW module. 140 * @wr: The completed WR from the ingress queue. 141 * @len: Length of the WR. 142 * @flb: Freelist buffer array. 143 * 144 */ 145 static void 146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len, 147 struct csio_fl_dma_buf *flb, void *cbfn_q) 148 { 149 struct csio_ioreq *ioreq; 150 uint8_t *scsiwr; 151 uint8_t subop; 152 void *cmnd; 153 unsigned long flags; 154 155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr); 156 if (likely(ioreq)) { 157 if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) { 158 subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET( 159 ((struct fw_scsi_abrt_cls_wr *) 160 scsiwr)->sub_opcode_to_chk_all_io); 161 162 csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n", 163 subop ? "Close" : "Abort", 164 ioreq, ioreq->wr_status); 165 166 spin_lock_irqsave(&hw->lock, flags); 167 if (subop) 168 csio_scsi_closed(ioreq, 169 (struct list_head *)cbfn_q); 170 else 171 csio_scsi_aborted(ioreq, 172 (struct list_head *)cbfn_q); 173 /* 174 * We call scsi_done for I/Os that driver thinks aborts 175 * have timed out. If there is a race caused by FW 176 * completing abort at the exact same time that the 177 * driver has deteced the abort timeout, the following 178 * check prevents calling of scsi_done twice for the 179 * same command: once from the eh_abort_handler, another 180 * from csio_scsi_isr_handler(). This also avoids the 181 * need to check if csio_scsi_cmnd(req) is NULL in the 182 * fast path. 183 */ 184 cmnd = csio_scsi_cmnd(ioreq); 185 if (unlikely(cmnd == NULL)) 186 list_del_init(&ioreq->sm.sm_list); 187 188 spin_unlock_irqrestore(&hw->lock, flags); 189 190 if (unlikely(cmnd == NULL)) 191 csio_put_scsi_ioreq_lock(hw, 192 csio_hw_to_scsim(hw), ioreq); 193 } else { 194 spin_lock_irqsave(&hw->lock, flags); 195 csio_scsi_completed(ioreq, (struct list_head *)cbfn_q); 196 spin_unlock_irqrestore(&hw->lock, flags); 197 } 198 } 199 } 200 201 /* 202 * csio_scsi_isr_handler() - Common SCSI ISR handler. 203 * @iq: Ingress queue pointer. 204 * 205 * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx 206 * by calling csio_wr_process_iq_idx. If there are completions on the 207 * isr_cbfn_q, yank them out into a local queue and call their io_cbfns. 208 * Once done, add these completions onto the freelist. 209 * This routine is shared b/w MSIX and INTx. 210 */ 211 static inline irqreturn_t 212 csio_scsi_isr_handler(struct csio_q *iq) 213 { 214 struct csio_hw *hw = (struct csio_hw *)iq->owner; 215 LIST_HEAD(cbfn_q); 216 struct list_head *tmp; 217 struct csio_scsim *scm; 218 struct csio_ioreq *ioreq; 219 int isr_completions = 0; 220 221 scm = csio_hw_to_scsim(hw); 222 223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, 224 &cbfn_q) != 0)) 225 return IRQ_NONE; 226 227 /* Call back the completion routines */ 228 list_for_each(tmp, &cbfn_q) { 229 ioreq = (struct csio_ioreq *)tmp; 230 isr_completions++; 231 ioreq->io_cbfn(hw, ioreq); 232 /* Release ddp buffer if used for this req */ 233 if (unlikely(ioreq->dcopy)) 234 csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list, 235 ioreq->nsge); 236 } 237 238 if (isr_completions) { 239 /* Return the ioreqs back to ioreq->freelist */ 240 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q, 241 isr_completions); 242 } 243 244 return IRQ_HANDLED; 245 } 246 247 /* 248 * csio_scsi_isr() - SCSI MSIX handler 249 * @irq: 250 * @dev_id: 251 * 252 * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler() 253 * for handling SCSI completions. 254 */ 255 static irqreturn_t 256 csio_scsi_isr(int irq, void *dev_id) 257 { 258 struct csio_q *iq = (struct csio_q *) dev_id; 259 struct csio_hw *hw; 260 261 if (unlikely(!iq)) 262 return IRQ_NONE; 263 264 hw = (struct csio_hw *)iq->owner; 265 266 if (unlikely(pci_channel_offline(hw->pdev))) { 267 CSIO_INC_STATS(hw, n_pcich_offline); 268 return IRQ_NONE; 269 } 270 271 csio_scsi_isr_handler(iq); 272 273 return IRQ_HANDLED; 274 } 275 276 /* 277 * csio_scsi_intx_handler() - SCSI INTx handler 278 * @irq: 279 * @dev_id: 280 * 281 * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler() 282 * for handling SCSI completions. 283 */ 284 void 285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, 286 struct csio_fl_dma_buf *flb, void *priv) 287 { 288 struct csio_q *iq = priv; 289 290 csio_scsi_isr_handler(iq); 291 292 } /* csio_scsi_intx_handler */ 293 294 /* 295 * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE. 296 * @irq: 297 * @dev_id: 298 * 299 * 300 */ 301 static irqreturn_t 302 csio_fcoe_isr(int irq, void *dev_id) 303 { 304 struct csio_hw *hw = (struct csio_hw *) dev_id; 305 struct csio_q *intx_q = NULL; 306 int rv; 307 irqreturn_t ret = IRQ_NONE; 308 unsigned long flags; 309 310 if (unlikely(!hw)) 311 return IRQ_NONE; 312 313 if (unlikely(pci_channel_offline(hw->pdev))) { 314 CSIO_INC_STATS(hw, n_pcich_offline); 315 return IRQ_NONE; 316 } 317 318 /* Disable the interrupt for this PCI function. */ 319 if (hw->intr_mode == CSIO_IM_INTX) 320 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI)); 321 322 /* 323 * The read in the following function will flush the 324 * above write. 325 */ 326 if (csio_hw_slow_intr_handler(hw)) 327 ret = IRQ_HANDLED; 328 329 /* Get the INTx Forward interrupt IQ. */ 330 intx_q = csio_get_q(hw, hw->intr_iq_idx); 331 332 CSIO_DB_ASSERT(intx_q); 333 334 /* IQ handler is not possible for intx_q, hence pass in NULL */ 335 if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0)) 336 ret = IRQ_HANDLED; 337 338 spin_lock_irqsave(&hw->lock, flags); 339 rv = csio_mb_isr_handler(hw); 340 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { 341 hw->flags |= CSIO_HWF_FWEVT_PENDING; 342 spin_unlock_irqrestore(&hw->lock, flags); 343 schedule_work(&hw->evtq_work); 344 return IRQ_HANDLED; 345 } 346 spin_unlock_irqrestore(&hw->lock, flags); 347 348 return ret; 349 } 350 351 static void 352 csio_add_msix_desc(struct csio_hw *hw) 353 { 354 int i; 355 struct csio_msix_entries *entryp = &hw->msix_entries[0]; 356 int k = CSIO_EXTRA_VECS; 357 int len = sizeof(entryp->desc) - 1; 358 int cnt = hw->num_sqsets + k; 359 360 /* Non-data vector */ 361 memset(entryp->desc, 0, len + 1); 362 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata", 363 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); 364 365 entryp++; 366 memset(entryp->desc, 0, len + 1); 367 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt", 368 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); 369 entryp++; 370 371 /* Name SCSI vecs */ 372 for (i = k; i < cnt; i++, entryp++) { 373 memset(entryp->desc, 0, len + 1); 374 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d", 375 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), 376 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS); 377 } 378 } 379 380 int 381 csio_request_irqs(struct csio_hw *hw) 382 { 383 int rv, i, j, k = 0; 384 struct csio_msix_entries *entryp = &hw->msix_entries[0]; 385 struct csio_scsi_cpu_info *info; 386 387 if (hw->intr_mode != CSIO_IM_MSIX) { 388 rv = request_irq(hw->pdev->irq, csio_fcoe_isr, 389 (hw->intr_mode == CSIO_IM_MSI) ? 390 0 : IRQF_SHARED, 391 KBUILD_MODNAME, hw); 392 if (rv) { 393 if (hw->intr_mode == CSIO_IM_MSI) 394 pci_disable_msi(hw->pdev); 395 csio_err(hw, "Failed to allocate interrupt line.\n"); 396 return -EINVAL; 397 } 398 399 goto out; 400 } 401 402 /* Add the MSIX vector descriptions */ 403 csio_add_msix_desc(hw); 404 405 rv = request_irq(entryp[k].vector, csio_nondata_isr, 0, 406 entryp[k].desc, hw); 407 if (rv) { 408 csio_err(hw, "IRQ request failed for vec %d err:%d\n", 409 entryp[k].vector, rv); 410 goto err; 411 } 412 413 entryp[k++].dev_id = (void *)hw; 414 415 rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0, 416 entryp[k].desc, hw); 417 if (rv) { 418 csio_err(hw, "IRQ request failed for vec %d err:%d\n", 419 entryp[k].vector, rv); 420 goto err; 421 } 422 423 entryp[k++].dev_id = (void *)hw; 424 425 /* Allocate IRQs for SCSI */ 426 for (i = 0; i < hw->num_pports; i++) { 427 info = &hw->scsi_cpu_info[i]; 428 for (j = 0; j < info->max_cpus; j++, k++) { 429 struct csio_scsi_qset *sqset = &hw->sqset[i][j]; 430 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; 431 432 rv = request_irq(entryp[k].vector, csio_scsi_isr, 0, 433 entryp[k].desc, q); 434 if (rv) { 435 csio_err(hw, 436 "IRQ request failed for vec %d err:%d\n", 437 entryp[k].vector, rv); 438 goto err; 439 } 440 441 entryp[k].dev_id = (void *)q; 442 443 } /* for all scsi cpus */ 444 } /* for all ports */ 445 446 out: 447 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; 448 449 return 0; 450 451 err: 452 for (i = 0; i < k; i++) { 453 entryp = &hw->msix_entries[i]; 454 free_irq(entryp->vector, entryp->dev_id); 455 } 456 pci_disable_msix(hw->pdev); 457 458 return -EINVAL; 459 } 460 461 static void 462 csio_disable_msix(struct csio_hw *hw, bool free) 463 { 464 int i; 465 struct csio_msix_entries *entryp; 466 int cnt = hw->num_sqsets + CSIO_EXTRA_VECS; 467 468 if (free) { 469 for (i = 0; i < cnt; i++) { 470 entryp = &hw->msix_entries[i]; 471 free_irq(entryp->vector, entryp->dev_id); 472 } 473 } 474 pci_disable_msix(hw->pdev); 475 } 476 477 /* Reduce per-port max possible CPUs */ 478 static void 479 csio_reduce_sqsets(struct csio_hw *hw, int cnt) 480 { 481 int i; 482 struct csio_scsi_cpu_info *info; 483 484 while (cnt < hw->num_sqsets) { 485 for (i = 0; i < hw->num_pports; i++) { 486 info = &hw->scsi_cpu_info[i]; 487 if (info->max_cpus > 1) { 488 info->max_cpus--; 489 hw->num_sqsets--; 490 if (hw->num_sqsets <= cnt) 491 break; 492 } 493 } 494 } 495 496 csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets); 497 } 498 499 static int 500 csio_enable_msix(struct csio_hw *hw) 501 { 502 int rv, i, j, k, n, min, cnt; 503 struct csio_msix_entries *entryp; 504 struct msix_entry *entries; 505 int extra = CSIO_EXTRA_VECS; 506 struct csio_scsi_cpu_info *info; 507 508 min = hw->num_pports + extra; 509 cnt = hw->num_sqsets + extra; 510 511 /* Max vectors required based on #niqs configured in fw */ 512 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) 513 cnt = min_t(uint8_t, hw->cfg_niq, cnt); 514 515 entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL); 516 if (!entries) 517 return -ENOMEM; 518 519 for (i = 0; i < cnt; i++) 520 entries[i].entry = (uint16_t)i; 521 522 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); 523 524 while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min) 525 cnt = rv; 526 if (!rv) { 527 if (cnt < (hw->num_sqsets + extra)) { 528 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); 529 csio_reduce_sqsets(hw, cnt - extra); 530 } 531 } else { 532 if (rv > 0) { 533 pci_disable_msix(hw->pdev); 534 csio_info(hw, "Not using MSI-X, remainder:%d\n", rv); 535 } 536 537 kfree(entries); 538 return -ENOMEM; 539 } 540 541 /* Save off vectors */ 542 for (i = 0; i < cnt; i++) { 543 entryp = &hw->msix_entries[i]; 544 entryp->vector = entries[i].vector; 545 } 546 547 /* Distribute vectors */ 548 k = 0; 549 csio_set_nondata_intr_idx(hw, entries[k].entry); 550 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry); 551 csio_set_fwevt_intr_idx(hw, entries[k++].entry); 552 553 for (i = 0; i < hw->num_pports; i++) { 554 info = &hw->scsi_cpu_info[i]; 555 556 for (j = 0; j < hw->num_scsi_msix_cpus; j++) { 557 n = (j % info->max_cpus) + k; 558 hw->sqset[i][j].intr_idx = entries[n].entry; 559 } 560 561 k += info->max_cpus; 562 } 563 564 kfree(entries); 565 return 0; 566 } 567 568 void 569 csio_intr_enable(struct csio_hw *hw) 570 { 571 hw->intr_mode = CSIO_IM_NONE; 572 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; 573 574 /* Try MSIX, then MSI or fall back to INTx */ 575 if ((csio_msi == 2) && !csio_enable_msix(hw)) 576 hw->intr_mode = CSIO_IM_MSIX; 577 else { 578 /* Max iqs required based on #niqs configured in fw */ 579 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || 580 !csio_is_hw_master(hw)) { 581 int extra = CSIO_EXTRA_MSI_IQS; 582 583 if (hw->cfg_niq < (hw->num_sqsets + extra)) { 584 csio_dbg(hw, "Reducing sqsets to %d\n", 585 hw->cfg_niq - extra); 586 csio_reduce_sqsets(hw, hw->cfg_niq - extra); 587 } 588 } 589 590 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev)) 591 hw->intr_mode = CSIO_IM_MSI; 592 else 593 hw->intr_mode = CSIO_IM_INTX; 594 } 595 596 csio_dbg(hw, "Using %s interrupt mode.\n", 597 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" : 598 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx")); 599 } 600 601 void 602 csio_intr_disable(struct csio_hw *hw, bool free) 603 { 604 csio_hw_intr_disable(hw); 605 606 switch (hw->intr_mode) { 607 case CSIO_IM_MSIX: 608 csio_disable_msix(hw, free); 609 break; 610 case CSIO_IM_MSI: 611 if (free) 612 free_irq(hw->pdev->irq, hw); 613 pci_disable_msi(hw->pdev); 614 break; 615 case CSIO_IM_INTX: 616 if (free) 617 free_irq(hw->pdev->irq, hw); 618 break; 619 default: 620 break; 621 } 622 hw->intr_mode = CSIO_IM_NONE; 623 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; 624 } 625